diff --git "a/val.csv" "b/val.csv" --- "a/val.csv" +++ "b/val.csv" @@ -1,140433 +1,3 @@ -hash,date,author,commit_message,is_merge,git_diff,type,masked_commit_message -e8e252376813264d2ee75b3ecef022471f6d6bf4,2022-10-05 15:07:53,Periklis Tsirakidis,operator: Use quayio v2.7.0-pre image for openshift overlay (#7329),False,"diff --git a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml -index 2315967ded340..320afd11b71ed 100644 ---- a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml -+++ b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml -@@ -1199,7 +1199,7 @@ spec: - - /manager - env: - - name: RELATED_IMAGE_LOKI -- value: docker.io/grafana/loki:main-ec0bf70 -+ value: quay.io/openshift-logging/loki:v2.7.0-pre - - name: RELATED_IMAGE_GATEWAY - value: quay.io/observatorium/api:latest - - name: RELATED_IMAGE_OPA -@@ -1327,7 +1327,7 @@ spec: - provider: - name: Grafana.com - relatedImages: -- - image: docker.io/grafana/loki:main-ec0bf70 -+ - image: quay.io/openshift-logging/loki:v2.7.0-pre - name: loki - - image: quay.io/observatorium/api:latest - name: gateway -diff --git a/operator/config/overlays/openshift/manager_related_image_patch.yaml b/operator/config/overlays/openshift/manager_related_image_patch.yaml -index 589a8610ee434..8d1b4495102f8 100644 ---- a/operator/config/overlays/openshift/manager_related_image_patch.yaml -+++ b/operator/config/overlays/openshift/manager_related_image_patch.yaml -@@ -9,7 +9,7 @@ spec: - - name: manager - env: - - name: RELATED_IMAGE_LOKI -- value: docker.io/grafana/loki:main-ec0bf70 -+ value: quay.io/openshift-logging/loki:v2.7.0-pre - - name: RELATED_IMAGE_GATEWAY - value: quay.io/observatorium/api:latest - - name: RELATED_IMAGE_OPA",operator,Use quayio v2.7.0-pre image for openshift overlay (#7329) -706c22e9e40b0156031f214b63dc6ed4e210abc1,2022-11-10 19:39:30,Jasper,"Loki: Add querier config to loki helm (#7627) - -add the ability to update querier config using `values.yaml` file",False,"diff --git a/docs/sources/installation/helm/reference.md b/docs/sources/installation/helm/reference.md -index d8710129b6d32..0c64710293a98 100644 ---- a/docs/sources/installation/helm/reference.md -+++ b/docs/sources/installation/helm/reference.md -@@ -1312,6 +1312,15 @@ null - ""runAsUser"": 10001 - } - -+ -+ -+ -+ loki.querier -+ object -+ Optional querier configuration -+
-+{}
-+
- - - -diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml -index ba474e79ac67d..48b1d58113a73 100644 ---- a/production/helm/loki/Chart.yaml -+++ b/production/helm/loki/Chart.yaml -@@ -4,7 +4,7 @@ name: loki - description: Helm chart for Grafana Loki in simple, scalable mode - type: application - appVersion: 2.6.1 --version: 3.3.3 -+version: 3.3.4 - home: https://grafana.github.io/helm-charts - sources: - - https://github.com/grafana/loki -diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md -index 910dfa49e6a57..e0cfe25733cac 100644 ---- a/production/helm/loki/README.md -+++ b/production/helm/loki/README.md -@@ -1,6 +1,6 @@ - # loki - --![Version: 3.3.3](https://img.shields.io/badge/Version-3.3.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.6.1](https://img.shields.io/badge/AppVersion-2.6.1-informational?style=flat-square) -+![Version: 3.3.4](https://img.shields.io/badge/Version-3.3.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.6.1](https://img.shields.io/badge/AppVersion-2.6.1-informational?style=flat-square) - - Helm chart for Grafana Loki in simple, scalable mode - -diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml -index 18f95051a6456..2dbfb65308ac6 100644 ---- a/production/helm/loki/values.yaml -+++ b/production/helm/loki/values.yaml -@@ -173,6 +173,11 @@ loki: - {{- tpl (. | toYaml) $ | nindent 4 }} - {{- end }} - -+ {{- with .Values.loki.querier }} -+ querier: -+ {{- tpl (. | toYaml) $ | nindent 4 }} -+ {{- end }} -+ - # Should authentication be enabled - auth_enabled: true - -@@ -259,6 +264,9 @@ loki: - # -- Optional analytics configuration - analytics: {} - -+ # -- Optional querier configuration -+ querier: {} -+ - enterprise: - # Enable enterprise features, license must be provided - enabled: false",Loki,"Add querier config to loki helm (#7627) - -add the ability to update querier config using `values.yaml` file" -3c47735deabab6fcdc7fad9bb15016dcefa0d692,2021-06-03 14:23:30,Michel Hollands,"Add a QueryFrontendTripperware module (#3792) - -Signed-off-by: Michel Hollands ",False,"diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go -index efcf230d78a1e..f3964238d254a 100644 ---- a/pkg/loki/loki.go -+++ b/pkg/loki/loki.go -@@ -366,6 +366,7 @@ func (t *Loki) setupModuleManager() error { - mm.RegisterModule(Ingester, t.initIngester) - mm.RegisterModule(Querier, t.initQuerier) - mm.RegisterModule(IngesterQuerier, t.initIngesterQuerier) -+ mm.RegisterModule(QueryFrontendTripperware, t.initQueryFrontendTripperware, modules.UserInvisibleModule) - mm.RegisterModule(QueryFrontend, t.initQueryFrontend) - mm.RegisterModule(RulerStorage, t.initRulerStorage, modules.UserInvisibleModule) - mm.RegisterModule(Ruler, t.initRuler) -@@ -375,19 +376,20 @@ func (t *Loki) setupModuleManager() error { - - // Add dependencies - deps := map[string][]string{ -- Ring: {RuntimeConfig, Server, MemberlistKV}, -- Overrides: {RuntimeConfig}, -- TenantConfigs: {RuntimeConfig}, -- Distributor: {Ring, Server, Overrides, TenantConfigs}, -- Store: {Overrides}, -- Ingester: {Store, Server, MemberlistKV, TenantConfigs}, -- Querier: {Store, Ring, Server, IngesterQuerier, TenantConfigs}, -- QueryFrontend: {Server, Overrides, TenantConfigs}, -- Ruler: {Ring, Server, Store, RulerStorage, IngesterQuerier, Overrides, TenantConfigs}, -- TableManager: {Server}, -- Compactor: {Server, Overrides}, -- IngesterQuerier: {Ring}, -- All: {Querier, Ingester, Distributor, TableManager, Ruler}, -+ Ring: {RuntimeConfig, Server, MemberlistKV}, -+ Overrides: {RuntimeConfig}, -+ TenantConfigs: {RuntimeConfig}, -+ Distributor: {Ring, Server, Overrides, TenantConfigs}, -+ Store: {Overrides}, -+ Ingester: {Store, Server, MemberlistKV, TenantConfigs}, -+ Querier: {Store, Ring, Server, IngesterQuerier, TenantConfigs}, -+ QueryFrontendTripperware: {Server, Overrides, TenantConfigs}, -+ QueryFrontend: {QueryFrontendTripperware}, -+ Ruler: {Ring, Server, Store, RulerStorage, IngesterQuerier, Overrides, TenantConfigs}, -+ TableManager: {Server}, -+ Compactor: {Server, Overrides}, -+ IngesterQuerier: {Ring}, -+ All: {Querier, Ingester, Distributor, TableManager, Ruler}, - } - - // Add IngesterQuerier as a dependency for store when target is either ingester or querier. -diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go -index 28beb64bcbb54..6a04381da06e7 100644 ---- a/pkg/loki/modules.go -+++ b/pkg/loki/modules.go -@@ -58,23 +58,24 @@ const maxChunkAgeForTableManager = 12 * time.Hour - - // The various modules that make up Loki. - const ( -- Ring string = ""ring"" -- RuntimeConfig string = ""runtime-config"" -- Overrides string = ""overrides"" -- TenantConfigs string = ""tenant-configs"" -- Server string = ""server"" -- Distributor string = ""distributor"" -- Ingester string = ""ingester"" -- Querier string = ""querier"" -- IngesterQuerier string = ""ingester-querier"" -- QueryFrontend string = ""query-frontend"" -- RulerStorage string = ""ruler-storage"" -- Ruler string = ""ruler"" -- Store string = ""store"" -- TableManager string = ""table-manager"" -- MemberlistKV string = ""memberlist-kv"" -- Compactor string = ""compactor"" -- All string = ""all"" -+ Ring string = ""ring"" -+ RuntimeConfig string = ""runtime-config"" -+ Overrides string = ""overrides"" -+ TenantConfigs string = ""tenant-configs"" -+ Server string = ""server"" -+ Distributor string = ""distributor"" -+ Ingester string = ""ingester"" -+ Querier string = ""querier"" -+ IngesterQuerier string = ""ingester-querier"" -+ QueryFrontend string = ""query-frontend"" -+ QueryFrontendTripperware string = ""query-frontend-tripperware"" -+ RulerStorage string = ""ruler-storage"" -+ Ruler string = ""ruler"" -+ Store string = ""store"" -+ TableManager string = ""table-manager"" -+ MemberlistKV string = ""memberlist-kv"" -+ Compactor string = ""compactor"" -+ All string = ""all"" - ) - - func (t *Loki) initServer() (services.Service, error) { -@@ -377,6 +378,26 @@ type disabledShuffleShardingLimits struct{} - - func (disabledShuffleShardingLimits) MaxQueriersPerUser(userID string) int { return 0 } - -+func (t *Loki) initQueryFrontendTripperware() (_ services.Service, err error) { -+ level.Debug(util_log.Logger).Log(""msg"", ""initializing query frontend tripperware"") -+ -+ tripperware, stopper, err := queryrange.NewTripperware( -+ t.Cfg.QueryRange, -+ util_log.Logger, -+ t.overrides, -+ t.Cfg.SchemaConfig.SchemaConfig, -+ t.Cfg.Querier.QueryIngestersWithin, -+ prometheus.DefaultRegisterer, -+ ) -+ if err != nil { -+ return -+ } -+ t.stopper = stopper -+ t.QueryFrontEndTripperware = tripperware -+ -+ return services.NewIdleService(nil, nil), nil -+} -+ - func (t *Loki) initQueryFrontend() (_ services.Service, err error) { - level.Debug(util_log.Logger).Log(""msg"", ""initializing query frontend"", ""config"", fmt.Sprintf(""%+v"", t.Cfg.Frontend)) - -@@ -394,27 +415,7 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) { - frontendv1pb.RegisterFrontendServer(t.Server.GRPC, t.frontend) - } - -- level.Debug(util_log.Logger).Log(""msg"", ""initializing query range tripperware"", -- ""config"", fmt.Sprintf(""%+v"", t.Cfg.QueryRange), -- ""limits"", fmt.Sprintf(""%+v"", t.Cfg.LimitsConfig), -- ) -- tripperware, stopper, err := queryrange.NewTripperware( -- t.Cfg.QueryRange, -- util_log.Logger, -- t.overrides, -- t.Cfg.SchemaConfig.SchemaConfig, -- t.Cfg.Querier.QueryIngestersWithin, -- prometheus.DefaultRegisterer, -- ) -- if err != nil { -- return -- } -- t.stopper = stopper -- -- roundTripper = tripperware(roundTripper) -- if t.QueryFrontEndTripperware != nil { -- roundTripper = t.QueryFrontEndTripperware(roundTripper) -- } -+ roundTripper = t.QueryFrontEndTripperware(roundTripper) - - frontendHandler := transport.NewHandler(t.Cfg.Frontend.Handler, roundTripper, util_log.Logger, prometheus.DefaultRegisterer) - if t.Cfg.Frontend.CompressResponses {",unknown,"Add a QueryFrontendTripperware module (#3792) - -Signed-off-by: Michel Hollands " -12c7eab8bb94fd82b184c1c222200e37f2ca050a,2020-05-20 21:08:32,Ed Welch,"Prep 1.5.0 release (#2098) - -* Updating the Changelog and Upgrade guide for 1.5.0 release. - -Signed-off-by: Ed Welch - -* Changing release number in all the docs - -Signed-off-by: Ed Welch ",False,"diff --git a/CHANGELOG.md b/CHANGELOG.md -index 49f786e889916..85ac6c732839a 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -1,3 +1,255 @@ -+## 1.5.0 (2020-05-20) -+ -+It's been a busy month and a half since 1.4.0 was released, and a lot of new improvements have been added to Loki since! -+ -+Be prepared for some configuration changes that may cause some bumps when upgrading, -+we apologize for this but are always striving to reach the right compromise of code simplicity and user/operating experience. -+ -+In this case we opted to keep a simplified configuration inline with Cortex rather than a more complicated and error prone internal config mapping or difficult to implement support for multiple config names for the same feature. -+ -+This does result in breaking config changes for some configurations, however, these will fail fast and with the [list of diffs](https://cortexmetrics.io/docs/changelog/#config-file-breaking-changes) from the Cortex project should be quick to fix. -+ -+### Important Notes -+ -+**Be prepared for breaking config changes.** Loki 1.5.0 vendors cortex [v1.0.1-0.20200430170006-3462eb63f324](https://github.com/cortexproject/cortex/commit/3462eb63f324c649bbaa122933bc591b710f4e48), -+there were substantial breaking config changes in Cortex 1.0 which standardized config options, and fixed typos. -+ -+**The Loki docker image user has changed to no longer be root** -+ -+Check the [upgrade guide](https://github.com/grafana/loki/blob/master/docs/operations/upgrade.md#150) for more detailed information on these changes. -+ -+### Notable Features and Fixes -+ -+There are quite a few we want to mention listed in order they were merged (mostly) -+ -+* [1837](https://github.com/grafana/loki/pull/1837) **sandeepsukhani**: flush boltdb to object store -+ -+This is perhaps the most exciting feature of 1.5.0, the first steps in removing a dependency on a separate index store! This feature is still very new and experimental, however, we want this to be the future for Loki. Only requiring just an object store. -+ -+If you want to test this new feature, and help us find any bugs, check out the [docs](docs/operations/storage/boltdb-shipper.md) to learn more and get started. -+ -+* [2073](https://github.com/grafana/loki/pull/2073) **slim-bean**: Loki: Allow configuring query_store_max_look_back_period when running a filesystem store and boltdb-shipper -+ -+This is even more experimental than the previous feature mentioned however also pretty exciting for Loki users who use the filesystem storage. We can leverage changes made in [1837](https://github.com/grafana/loki/pull/1837) to now allow Loki to run in a clustered mode with individual filesystem stores! -+ -+Please check out the last section in the [filesystem docs](docs/operations/storage/filesystem.md) for more details on how this works and how to use it! -+ -+* [2095](https://github.com/grafana/loki/pull/2095) **cyriltovena**: Adds backtick for the quoted string token lexer. -+ -+This will come as a big win to anyone who is writing complicated reqular expressions in either their Label matchers or Filter Expressions. Starting now you can use the backtick to encapsulate your regex **and not have to do any escaping of special characters!!** -+ -+Examples: -+ -+``` -+{name=""cassandra""} |~ `error=\w+` -+{name!~`mysql-\d+`} -+``` -+ -+* [2055](https://github.com/grafana/loki/pull/2055) **aknuds1**: Chore: Fix spelling of per second in code -+ -+This is technically a breaking change for anyone who wrote code to processes the new statistics output in the query result added in 1.4.0, we apologize to anyone in this situation but if we don't fix this kind of error now it will be there forever. -+And at the same time we didn't feel it was appropriate to make any major api revision changes for such a new feature and simple change. We are always trying to use our best judgement in cases like this. -+ -+* [2031](https://github.com/grafana/loki/pull/2031) **cyriltovena**: Improve protobuf serialization -+ -+Thanks @cyriltovena for another big performance improvement in Loki, this time around protbuf's! -+ -+* [2021](https://github.com/grafana/loki/pull/2021) **slim-bean**: Loki: refactor validation and improve error messages -+* [2012](https://github.com/grafana/loki/pull/2012) **slim-bean**: Loki: Improve logging and add metrics to streams dropped by stream limit -+ -+These two changes standardize the metrics used to report when a tenant hits a limit, now all discarded samples should be reported under `loki_discarded_samples_total` and you no longer need to also reference `cortex_discarded_samples_total`. -+Additionally error messages were improved to help clients take better action when hitting limits. -+ -+* [1970](https://github.com/grafana/loki/pull/1970) **cyriltovena**: Allow to aggregate binary operations. -+ -+Another nice improvement to the query language which allows queries like this to work now: -+ -+``` -+sum by (job) (count_over_time({namespace=""tns""}[5m] |= ""level=error"") / count_over_time({namespace=""tns""}[5m])) -+``` -+ -+* [1713](https://github.com/grafana/loki/pull/1713) **adityacs**: Log error message for invalid checksum -+ -+In the event something went wrong with a stored chunk, rather than fail the query we ignore the chunk and return the rest. -+ -+* [2066](https://github.com/grafana/loki/pull/2066) **slim-bean**: Promtail: metrics stage can also count line bytes -+ -+This is a nice extension to a previous feature which let you add a metric to count log lines per stream, you can now count log bytes per stream. -+ -+Check out [this example](docs/clients/promtail/configuration.md#counter) to configure this in your promtail pipelines. -+ -+* [1935](https://github.com/grafana/loki/pull/1935) **cyriltovena**: Support stdin target via flag instead of automatic detection. -+ -+Third times a charm! With 1.4.0 we allowed sending logs directly to promtail via stdin, with 1.4.1 we released a patch for this feature which wasn't detecting stdin correctly on some operating systems. -+Unfortunately after a few more bug reports it seems this change caused some more undesired side effects so we decided to not try to autodetect stdin at all, instead now you must pass the `--stdin` flag if you want Promtail to listen for logs on stdin. -+ -+* [2076](https://github.com/grafana/loki/pull/2076) **cyriltovena**: Allows to pass inlined pipeline stages to the docker driver. -+* [1906](https://github.com/grafana/loki/pull/1906) **cyriltovena**: Add no-file and keep-file log option for docker driver. -+ -+The docker logging driver received a couple very nice updates, it's always been challenging to configure pipeline stages for the docker driver, with the first PR there are now a few easier ways to do this! -+In the second PR we added config options to control keeping any log files on the host when using the docker logging driver, allowing you to run with no disk access if you would like, as well as allowing you to control keeping log files available after container restarts. -+ -+** [1864](https://github.com/grafana/loki/pull/1864) **cyriltovena**: Sign helm package with GPG. -+ -+We now GPG sign helm packages! -+ -+### All Changes -+ -+#### Loki -+ -+* [2097](https://github.com/grafana/loki/pull/2097) **owen-d**: simplifies/updates some of our configuration examples -+* [2095](https://github.com/grafana/loki/pull/2095) **cyriltovena**: Adds backtick for the quoted string token lexer. -+* [2093](https://github.com/grafana/loki/pull/2093) **cyriltovena**: Fixes unit in stats request log. -+* [2088](https://github.com/grafana/loki/pull/2088) **slim-bean**: Loki: allow no encoding/compression on chunks -+* [2078](https://github.com/grafana/loki/pull/2078) **owen-d**: removes yolostring -+* [2073](https://github.com/grafana/loki/pull/2073) **slim-bean**: Loki: Allow configuring query_store_max_look_back_period when running a filesystem store and boltdb-shipper -+* [2064](https://github.com/grafana/loki/pull/2064) **cyriltovena**: Reverse entry iterator pool -+* [2059](https://github.com/grafana/loki/pull/2059) **cyriltovena**: Recover from panic in http and grpc handlers. -+* [2058](https://github.com/grafana/loki/pull/2058) **cyriltovena**: Fix a bug in range vector skipping data. -+* [2055](https://github.com/grafana/loki/pull/2055) **aknuds1**: Chore: Fix spelling of per second in code -+* [2046](https://github.com/grafana/loki/pull/2046) **gouthamve**: Fix bug in logql parsing that leads to crash. -+* [2050](https://github.com/grafana/loki/pull/2050) **aknuds1**: Chore: Correct typo ""per seconds"" -+* [2034](https://github.com/grafana/loki/pull/2034) **sandeepsukhani**: some metrics for measuring performance and failures in boltdb shipper -+* [2031](https://github.com/grafana/loki/pull/2031) **cyriltovena**: Improve protobuf serialization -+* [2030](https://github.com/grafana/loki/pull/2030) **adityacs**: Update loki to cortex master -+* [2023](https://github.com/grafana/loki/pull/2023) **cyriltovena**: Support post requests in the frontend queryrange handler. -+* [2021](https://github.com/grafana/loki/pull/2021) **slim-bean**: Loki: refactor validation and improve error messages -+* [2019](https://github.com/grafana/loki/pull/2019) **slim-bean**: make `loki_ingester_memory_streams` Gauge per tenant. -+* [2012](https://github.com/grafana/loki/pull/2012) **slim-bean**: Loki: Improve logging and add metrics to streams dropped by stream limit -+* [2010](https://github.com/grafana/loki/pull/2010) **cyriltovena**: Update lz4 library to latest to ensure deterministic output. -+* [2001](https://github.com/grafana/loki/pull/2001) **sandeepsukhani**: table client for boltdb shipper to enforce retention -+* [1995](https://github.com/grafana/loki/pull/1995) **sandeepsukhani**: make boltdb shipper singleton and some other minor refactoring -+* [1987](https://github.com/grafana/loki/pull/1987) **slim-bean**: Loki: Add a missing method to facade which is called by the metrics storage client in cortex -+* [1982](https://github.com/grafana/loki/pull/1982) **cyriltovena**: Update cortex to latest. -+* [1977](https://github.com/grafana/loki/pull/1977) **cyriltovena**: Ensure trace propagation in our logs. -+* [1976](https://github.com/grafana/loki/pull/1976) **slim-bean**: incorporate some better defaults into table-manager configs -+* [1975](https://github.com/grafana/loki/pull/1975) **slim-bean**: Update cortex vendoring to latest master -+* [1970](https://github.com/grafana/loki/pull/1970) **cyriltovena**: Allow to aggregate binary operations. -+* [1965](https://github.com/grafana/loki/pull/1965) **slim-bean**: Loki: Adds an `interval` paramater to query_range queries allowing a sampling of events to be returned based on the provided interval -+* [1964](https://github.com/grafana/loki/pull/1964) **owen-d**: chunk bounds metric now records 8h range in 1h increments -+* [1963](https://github.com/grafana/loki/pull/1963) **cyriltovena**: Improve the local config to work locally and inside docker. -+* [1961](https://github.com/grafana/loki/pull/1961) **jpmcb**: [Bug] Workaround for broken etcd gomod import -+* [1958](https://github.com/grafana/loki/pull/1958) **owen-d**: chunk lifespan histogram -+* [1956](https://github.com/grafana/loki/pull/1956) **sandeepsukhani**: update cortex to latest master -+* [1953](https://github.com/grafana/loki/pull/1953) **jpmcb**: Go mod: explicit golang.org/x/net replace -+* [1950](https://github.com/grafana/loki/pull/1950) **cyriltovena**: Fixes case handling in regex simplification. -+* [1949](https://github.com/grafana/loki/pull/1949) **SerialVelocity**: [Loki]: Cleanup dockerfile -+* [1946](https://github.com/grafana/loki/pull/1946) **slim-bean**: Loki Update the cut block size counter when creating a memchunk from byte slice -+* [1939](https://github.com/grafana/loki/pull/1939) **owen-d**: adds config validation, similar to cortex -+* [1916](https://github.com/grafana/loki/pull/1916) **cyriltovena**: Add cap_net_bind_service linux capabilities to Loki. -+* [1914](https://github.com/grafana/loki/pull/1914) **owen-d**: only fetches one chunk per series in /series -+* [1875](https://github.com/grafana/loki/pull/1875) **owen-d**: support `match[]` encoding -+* [1869](https://github.com/grafana/loki/pull/1869) **pstibrany**: Update Cortex to latest master -+* [1846](https://github.com/grafana/loki/pull/1846) **owen-d**: Sharding optimizations I: AST mapping -+* [1838](https://github.com/grafana/loki/pull/1838) **cyriltovena**: Move default port for Loki to 3100 everywhere. -+* [1837](https://github.com/grafana/loki/pull/1837) **sandeepsukhani**: flush boltdb to object store -+* [1834](https://github.com/grafana/loki/pull/1834) **Mario-Hofstaetter**: Loki/Change local storage directory to /loki/ and fix permissions (#1833) -+* [1819](https://github.com/grafana/loki/pull/1819) **cyriltovena**: Adds a counter for total flushed chunks per reason. -+* [1816](https://github.com/grafana/loki/pull/1816) **sdojjy**: loki can not be started with loki-local-config.yaml -+* [1810](https://github.com/grafana/loki/pull/1810) **cyriltovena**: Optimize empty filter queries. -+* [1809](https://github.com/grafana/loki/pull/1809) **cyriltovena**: Test stats memchunk -+* [1804](https://github.com/grafana/loki/pull/1804) **pstibrany**: Convert Loki modules to services -+* [1799](https://github.com/grafana/loki/pull/1799) **pstibrany**: loki: update Cortex to master -+* [1798](https://github.com/grafana/loki/pull/1798) **adityacs**: Support configurable maximum of the limits parameter -+* [1713](https://github.com/grafana/loki/pull/1713) **adityacs**: Log error message for invalid checksum -+* [1706](https://github.com/grafana/loki/pull/1706) **cyriltovena**: Non-root user docker image for Loki. -+ -+#### Logcli -+* [2027](https://github.com/grafana/loki/pull/2027) **pstibrany**: logcli: Query needs to be stored into url.RawQuery, and not url.Path -+* [2000](https://github.com/grafana/loki/pull/2000) **cyriltovena**: Improve URL building in the logcli to strip trailing /. -+* [1922](https://github.com/grafana/loki/pull/1922) **bavarianbidi**: logcli: org-id/tls-skip-verify set via env var -+* [1861](https://github.com/grafana/loki/pull/1861) **yeya24**: Support series API in logcli -+* [1850](https://github.com/grafana/loki/pull/1850) **chrischdi**: BugFix: Fix logcli client to use OrgID in LiveTail -+* [1814](https://github.com/grafana/loki/pull/1814) **cyriltovena**: Logcli remote storage. -+* [1712](https://github.com/grafana/loki/pull/1712) **rfratto**: clarify logcli commands and output -+ -+#### Promtail -+* [2069](https://github.com/grafana/loki/pull/2069) **slim-bean**: Promtail: log at debug level when nothing matches the specified path for a file target -+* [2066](https://github.com/grafana/loki/pull/2066) **slim-bean**: Promtail: metrics stage can also count line bytes -+* [2049](https://github.com/grafana/loki/pull/2049) **adityacs**: Fix promtail client default values -+* [2075](https://github.com/grafana/loki/pull/2075) **cyriltovena**: Fixes a panic in dry-run when using external labels. -+* [2026](https://github.com/grafana/loki/pull/2026) **adityacs**: Targets not required in promtail config -+* [2004](https://github.com/grafana/loki/pull/2004) **cyriltovena**: Adds config to disable HTTP and GRPC server in Promtail. -+* [1935](https://github.com/grafana/loki/pull/1935) **cyriltovena**: Support stdin target via flag instead of automatic detection. -+* [1920](https://github.com/grafana/loki/pull/1920) **alexanderGalushka**: feat: tms readiness check bypass implementation -+* [1894](https://github.com/grafana/loki/pull/1894) **cyriltovena**: Fixes possible panic in json pipeline stage. -+* [1865](https://github.com/grafana/loki/pull/1865) **adityacs**: Fix flaky promtail test -+* [1815](https://github.com/grafana/loki/pull/1815) **adityacs**: Log error message when source does not exist in extracted values -+* [1627](https://github.com/grafana/loki/pull/1627) **rfratto**: Proposal: Promtail Push API -+ -+#### Docker Driver -+* [2076](https://github.com/grafana/loki/pull/2076) **cyriltovena**: Allows to pass inlined pipeline stages to the docker driver. -+* [2054](https://github.com/grafana/loki/pull/2054) **bkmit**: Docker driver: Allow to provision external pipeline files to plugin -+* [1906](https://github.com/grafana/loki/pull/1906) **cyriltovena**: Add no-file and keep-file log option for docker driver. -+* [1903](https://github.com/grafana/loki/pull/1903) **cyriltovena**: Log docker driver config map. -+ -+#### FluentD -+* [2074](https://github.com/grafana/loki/pull/2074) **osela**: fluentd plugin: support placeholders in tenant field -+* [2006](https://github.com/grafana/loki/pull/2006) **Skeen**: fluent-plugin-loki: Restructuring and CI -+* [1909](https://github.com/grafana/loki/pull/1909) **jgehrcke**: fluentd loki plugin README: add note about labels -+* [1853](https://github.com/grafana/loki/pull/1853) **wardbekker**: bump gem version -+* [1811](https://github.com/grafana/loki/pull/1811) **JamesJJ**: Error handling: Show data stream at ""debug"" level, not ""warn"" -+ -+#### Fluent Bit -+* [2040](https://github.com/grafana/loki/pull/2040) **avii-ridge**: Add extraOutputs variable to support multiple outputs for fluent-bit -+* [1915](https://github.com/grafana/loki/pull/1915) **DirtyCajunRice**: Fix fluent-bit metrics -+* [1890](https://github.com/grafana/loki/pull/1890) **dottedmag**: fluentbit: JSON encoding: avoid base64 encoding of []byte inside other slices -+* [1791](https://github.com/grafana/loki/pull/1791) **cyriltovena**: Improve fluentbit logfmt. -+ -+#### Ksonnet -+* [1980](https://github.com/grafana/loki/pull/1980) **cyriltovena**: Log slow query from the frontend by default in ksonnet. -+ -+##### Mixins -+* [2080](https://github.com/grafana/loki/pull/2080) **beorn7**: mixin: Accept suffixes to pod name in instance labels -+* [2044](https://github.com/grafana/loki/pull/2044) **slim-bean**: Dashboards: fixes the cpu usage graphs -+* [2043](https://github.com/grafana/loki/pull/2043) **joe-elliott**: Swapped to container restarts over terminated reasons -+* [2041](https://github.com/grafana/loki/pull/2041) **slim-bean**: Dashboard: Loki Operational improvements -+* [1934](https://github.com/grafana/loki/pull/1934) **tomwilkie**: Put loki-mixin and promtail-mixin dashboards in a folder. -+* [1913](https://github.com/grafana/loki/pull/1913) **tomwilkie**: s/dashboards/grafanaDashboards. -+ -+#### Helm -+* [2038](https://github.com/grafana/loki/pull/2038) **oke-py**: Docs: update Loki Helm Chart document to support Helm 3 -+* [2015](https://github.com/grafana/loki/pull/2015) **etashsingh**: Change image tag from 1.4.1 to 1.4.0 in Helm chart -+* [1981](https://github.com/grafana/loki/pull/1981) **sshah90**: added extraCommandlineArgs in values file -+* [1967](https://github.com/grafana/loki/pull/1967) **rdxmb**: helm chart: add missing line feed -+* [1898](https://github.com/grafana/loki/pull/1898) **stefanandres**: [helm loki/promtail] make UpdateStrategy configurable -+* [1871](https://github.com/grafana/loki/pull/1871) **stefanandres**: [helm loki/promtail] Add systemd-journald example with extraMount, extraVolumeMount -+* [1864](https://github.com/grafana/loki/pull/1864) **cyriltovena**: Sign helm package with GPG. -+* [1825](https://github.com/grafana/loki/pull/1825) **polar3130**: Helm/loki-stack: refresh default grafana.image.tag to 6.7.0 -+* [1817](https://github.com/grafana/loki/pull/1817) **bclermont**: Helm chart: Prevent prometheus to scrape both services -+ -+#### Loki Canary -+* [1891](https://github.com/grafana/loki/pull/1891) **joe-elliott**: Addition of a `/suspend` endpoint to Loki Canary -+ -+#### Docs -+* [2056](https://github.com/grafana/loki/pull/2056) **cyriltovena**: Update api.md -+* [2014](https://github.com/grafana/loki/pull/2014) **jsoref**: Spelling -+* [1999](https://github.com/grafana/loki/pull/1999) **oddlittlebird**: Docs: Added labels content -+* [1974](https://github.com/grafana/loki/pull/1974) **rfratto**: fix stores for chunk and index in documentation for period_config -+* [1966](https://github.com/grafana/loki/pull/1966) **oddlittlebird**: Docs: Update docker.md -+* [1951](https://github.com/grafana/loki/pull/1951) **cstyan**: Move build from source instructions to root readme. -+* [1945](https://github.com/grafana/loki/pull/1945) **FlorianLudwig**: docs: version pin the docker image in docker-compose -+* [1925](https://github.com/grafana/loki/pull/1925) **wardbekker**: Clarified that the api push path needs to be specified. -+* [1905](https://github.com/grafana/loki/pull/1905) **sshah90**: updating typo for end time parameter in api docs -+* [1888](https://github.com/grafana/loki/pull/1888) **slim-bean**: docs: cleaning up the comments for the cache_config, default_validity option -+* [1887](https://github.com/grafana/loki/pull/1887) **slim-bean**: docs: Adding a config change in release 1.4 upgrade doc, updating readme with new doc links -+* [1881](https://github.com/grafana/loki/pull/1881) **cyriltovena**: Add precision about the range notation for LogQL. -+* [1879](https://github.com/grafana/loki/pull/1879) **slim-bean**: docs: update promtail docs for backoff -+* [1873](https://github.com/grafana/loki/pull/1873) **owen-d**: documents frontend worker -+* [1870](https://github.com/grafana/loki/pull/1870) **ushuz**: Docs: Keep plugin install command example in one line -+* [1856](https://github.com/grafana/loki/pull/1856) **slim-bean**: docs: tweak the doc section of the readme a little -+* [1852](https://github.com/grafana/loki/pull/1852) **slim-bean**: docs: clean up schema recommendations -+* [1843](https://github.com/grafana/loki/pull/1843) **vishesh92**: Docs: Update configuration docs for redis -+ -+#### Build -+* [2042](https://github.com/grafana/loki/pull/2042) **rfratto**: Fix drone -+* [2009](https://github.com/grafana/loki/pull/2009) **cyriltovena**: Adds :delegated flags to speed up build experience on MacOS. -+* [1942](https://github.com/grafana/loki/pull/1942) **owen-d**: delete tag script filters by prefix instead of substring -+* [1918](https://github.com/grafana/loki/pull/1918) **slim-bean**: build: This Dockerfile is a remnant from a long time ago, not needed. -+* [1911](https://github.com/grafana/loki/pull/1911) **slim-bean**: build: push images for `k` branches -+* [1849](https://github.com/grafana/loki/pull/1849) **cyriltovena**: Pin helm version in circle-ci helm testing workflow. -+ -+ - ## 1.4.1 (2020-04-06) - - We realized after the release last week that piping data into promtail was not working on Linux or Windows, this should fix this issue for both platforms: -diff --git a/README.md b/README.md -index 2779ada7f55a0..c713ee8c2f3ec 100644 ---- a/README.md -+++ b/README.md -@@ -29,11 +29,9 @@ Loki differs from Prometheus by focusing on logs instead of metrics, and deliver - - ## Getting started - --* [Installing Loki](https://github.com/grafana/loki/tree/v1.4.1/docs/installation/README.md) --* [Installing --Promtail](https://github.com/grafana/loki/tree/v1.4.1/docs/clients/promtail/installation.md) --* [Getting --Started Guide](https://github.com/grafana/loki/tree/v1.4.1/docs/getting-started/README.md) -+* [Installing Loki](https://github.com/grafana/loki/tree/v1.5.0/docs/installation/README.md) -+* [Installing Promtail](https://github.com/grafana/loki/tree/v1.5.0/docs/clients/promtail/installation.md) -+* [Getting Started Guide](https://github.com/grafana/loki/tree/v1.5.0/docs/getting-started/README.md) - - ## Upgrading - -@@ -42,6 +40,7 @@ Started Guide](https://github.com/grafana/loki/tree/v1.4.1/docs/getting-started/ - ### Documentation - - * [master](./docs/README.md) -+* [v1.5.0](https://github.com/grafana/loki/tree/v1.5.0/docs/README.md) - * [v1.4.1](https://github.com/grafana/loki/tree/v1.4.1/docs/README.md) - * [v1.4.0](https://github.com/grafana/loki/tree/v1.4.0/docs/README.md) - * [v1.3.0](https://github.com/grafana/loki/tree/v1.3.0/docs/README.md) -@@ -49,18 +48,18 @@ Started Guide](https://github.com/grafana/loki/tree/v1.4.1/docs/getting-started/ - * [v1.1.0](https://github.com/grafana/loki/tree/v1.1.0/docs/README.md) - * [v1.0.0](https://github.com/grafana/loki/tree/v1.0.0/docs/README.md) - --Commonly used sections (from the latest release v1.4.1): -+Commonly used sections (from the latest release v1.5.0): - --- [API documentation](https://github.com/grafana/loki/tree/v1.4.1/docs/api.md) for alternative ways of getting logs into Loki. -+- [API documentation](https://github.com/grafana/loki/tree/v1.5.0/docs/api.md) for alternative ways of getting logs into Loki. - - [Labels](https://github.com/grafana/loki/blob/master/docs/getting-started/labels.md) --- [Operations](https://github.com/grafana/loki/tree/v1.4.1/docs/operations) for important aspects of running Loki. --- [Promtail](https://github.com/grafana/loki/tree/v1.4.1/docs/clients/promtail) is an agent which can tail your log files and push them to Loki. --- [Pipelines](https://github.com/grafana/loki/tree/v1.4.1/docs/clients/promtail/pipelines.md) for detailed log processing pipeline documentation --- [Docker Logging Driver](https://github.com/grafana/loki/tree/v1.4.1/docs/clients/docker-driver) is a docker plugin to send logs directly to Loki from Docker containers. --- [LogCLI](https://github.com/grafana/loki/tree/v1.4.1/docs/getting-started/logcli.md) on how to query your logs without Grafana. --- [Loki Canary](https://github.com/grafana/loki/tree/v1.4.1/docs/operations/loki-canary.md) for monitoring your Loki installation for missing logs. --- [Troubleshooting](https://github.com/grafana/loki/tree/v1.4.1/docs/getting-started/troubleshooting.md) for help around frequent error messages. --- [Loki in Grafana](https://github.com/grafana/loki/tree/v1.4.1/docs/getting-started/grafana.md) for how to set up a Loki datasource in Grafana and query your logs. -+- [Operations](https://github.com/grafana/loki/tree/v1.5.0/docs/operations) for important aspects of running Loki. -+- [Promtail](https://github.com/grafana/loki/tree/v1.5.0/docs/clients/promtail) is an agent which can tail your log files and push them to Loki. -+- [Pipelines](https://github.com/grafana/loki/tree/v1.5.0/docs/clients/promtail/pipelines.md) for detailed log processing pipeline documentation -+- [Docker Logging Driver](https://github.com/grafana/loki/tree/v1.5.0/docs/clients/docker-driver) is a docker plugin to send logs directly to Loki from Docker containers. -+- [LogCLI](https://github.com/grafana/loki/tree/v1.5.0/docs/getting-started/logcli.md) on how to query your logs without Grafana. -+- [Loki Canary](https://github.com/grafana/loki/tree/v1.5.0/docs/operations/loki-canary.md) for monitoring your Loki installation for missing logs. -+- [Troubleshooting](https://github.com/grafana/loki/tree/v1.5.0/docs/getting-started/troubleshooting.md) for help around frequent error messages. -+- [Loki in Grafana](https://github.com/grafana/loki/tree/v1.5.0/docs/getting-started/grafana.md) for how to set up a Loki datasource in Grafana and query your logs. - - ## Getting Help - -diff --git a/docs/clients/promtail/installation.md b/docs/clients/promtail/installation.md -index 7dae8efd31659..191e3efa870b7 100644 ---- a/docs/clients/promtail/installation.md -+++ b/docs/clients/promtail/installation.md -@@ -12,7 +12,7 @@ Every release includes binaries for Promtail which can be found on the - - ```bash - # modify tag to most recent version --$ docker pull grafana/promtail:1.4.1 -+$ docker pull grafana/promtail:1.5.0 - ``` - - ## Helm -diff --git a/docs/installation/docker.md b/docs/installation/docker.md -index 82be1cec37273..850ae4b3eedd5 100644 ---- a/docs/installation/docker.md -+++ b/docs/installation/docker.md -@@ -15,10 +15,10 @@ For production, we recommend Tanka or Helm. - Copy and paste the commands below into your command line. - - ```bash --wget https://raw.githubusercontent.com/grafana/loki/v1.4.1/cmd/loki/loki-local-config.yaml -O loki-config.yaml --docker run -v $(pwd):/mnt/config -p 3100:3100 grafana/loki:1.4.1 -config.file=/mnt/config/loki-config.yaml --wget https://raw.githubusercontent.com/grafana/loki/v1.4.1/cmd/promtail/promtail-docker-config.yaml -O promtail-config.yaml --docker run -v $(pwd):/mnt/config -v /var/log:/var/log grafana/promtail:1.4.1 -config.file=/mnt/config/promtail-config.yaml -+wget https://raw.githubusercontent.com/grafana/loki/v1.5.0/cmd/loki/loki-local-config.yaml -O loki-config.yaml -+docker run -v $(pwd):/mnt/config -p 3100:3100 grafana/loki:1.5.0 -config.file=/mnt/config/loki-config.yaml -+wget https://raw.githubusercontent.com/grafana/loki/v1.5.0/cmd/promtail/promtail-docker-config.yaml -O promtail-config.yaml -+docker run -v $(pwd):/mnt/config -v /var/log:/var/log grafana/promtail:1.5.0 -config.file=/mnt/config/promtail-config.yaml - ``` - - When finished, loki-config.yaml and promtail-config.yaml are downloaded in the directory you chose. Docker containers are running Loki and Promtail using those config files. -@@ -31,10 +31,10 @@ Copy and paste the commands below into your terminal. Note that you will need to - - ```bash - cd """" --wget https://raw.githubusercontent.com/grafana/loki/v1.4.1/cmd/loki/loki-local-config.yaml -O loki-config.yaml --docker run -v :/mnt/config -p 3100:3100 grafana/loki:1.4.1 --config.file=/mnt/config/loki-config.yaml --wget https://raw.githubusercontent.com/grafana/loki/v1.4.1/cmd/promtail/promtail-docker-config.yaml -O promtail-config.yaml --docker run -v :/mnt/config -v /var/log:/var/log grafana/promtail:1.4.1 --config.file=/mnt/config/promtail-config.yaml -+wget https://raw.githubusercontent.com/grafana/loki/v1.5.0/cmd/loki/loki-local-config.yaml -O loki-config.yaml -+docker run -v :/mnt/config -p 3100:3100 grafana/loki:1.5.0 --config.file=/mnt/config/loki-config.yaml -+wget https://raw.githubusercontent.com/grafana/loki/v1.5.0/cmd/promtail/promtail-docker-config.yaml -O promtail-config.yaml -+docker run -v :/mnt/config -v /var/log:/var/log grafana/promtail:1.5.0 --config.file=/mnt/config/promtail-config.yaml - ``` - - When finished, loki-config.yaml and promtail-config.yaml are downloaded in the directory you chose. Docker containers are running Loki and Promtail using those config files. -@@ -44,6 +44,6 @@ Navigate to http://localhost:3100/metrics to view the output. - ## Install with Docker Compose - - ```bash --$ wget https://raw.githubusercontent.com/grafana/loki/v1.4.1/production/docker-compose.yaml -O docker-compose.yaml -+$ wget https://raw.githubusercontent.com/grafana/loki/v1.5.0/production/docker-compose.yaml -O docker-compose.yaml - $ docker-compose -f docker-compose.yaml up - ``` -diff --git a/docs/operations/loki-canary.md b/docs/operations/loki-canary.md -index 7aa31373b6ea1..5709b2f756d77 100644 ---- a/docs/operations/loki-canary.md -+++ b/docs/operations/loki-canary.md -@@ -67,7 +67,7 @@ Loki Canary is also provided as a Docker container image: - - ```bash - # change tag to the most recent release --$ docker pull grafana/loki-canary:1.4.1 -+$ docker pull grafana/loki-canary:1.5.0 - ``` - - ### Kubernetes -diff --git a/docs/operations/upgrade.md b/docs/operations/upgrade.md -index 9cf4deb10a218..bbd142fd41e05 100644 ---- a/docs/operations/upgrade.md -+++ b/docs/operations/upgrade.md -@@ -8,6 +8,10 @@ On this page we will document any upgrade issues/gotchas/considerations we are a - - ## 1.5.0 - -+Note: The required upgrade path outlined for version 1.4.0 below is still true for moving to 1.5.0 from any release older than 1.4.0 (e.g. 1.3.0->1.5.0 needs to also look at the 1.4.0 upgrade requirements). -+ -+### Breaking config changes! -+ - Loki 1.5.0 vendors Cortex v1.0.0 (congratulations!), which has a [massive list of changes](https://cortexmetrics.io/docs/changelog/#1-0-0-2020-04-02). - - While changes in the command line flags affect Loki as well, we usually recommend people to use configuration file instead. -@@ -16,6 +20,92 @@ Cortex has done lot of cleanup in the configuration files, and you are strongly - - Following fields were removed from YAML configuration completely: `claim_on_rollout` (always true), `normalise_tokens` (always true). - -+#### Test Your Config -+ -+To see if your config needs to change, one way to quickly test is to download a 1.5.0 (or newer) binary from the [release page](https://github.com/grafana/loki/releases/tag/v1.5.0) -+ -+Then run the binary providing your config file `./loki-linux-amd64 -config.file=myconfig.yaml` -+ -+If there are configs which are no longer valid you will see errors immediately: -+ -+```shell -+./loki-linux-amd64 -config.file=loki-local-config.yaml -+failed parsing config: loki-local-config.yaml: yaml: unmarshal errors: -+ line 35: field dynamodbconfig not found in type aws.StorageConfig -+``` -+ -+Referencing the [list of diffs](https://cortexmetrics.io/docs/changelog/#config-file-breaking-changes) I can see this config changed: -+ -+```diff -+- dynamodbconfig: -++ dynamodb: -+``` -+ -+Also several other AWS related configs changed and would need to udpate those as well. -+ -+ -+### Loki Docker Image User and File Location Changes -+ -+To improve security concerns, in 1.5.0 the Docker container no longer runs the loki process as `root` and instead the process runs as user `loki` with UID `10001` and GID `10001` -+ -+This may affect people in a couple ways: -+ -+#### Loki Port -+ -+If you are running Loki with a config that opens a port number above 1000 (which is the default, 3100 for HTTP and 9095 for GRPC) everything should work fine in regards to ports. -+ -+If you are running Loki with a config that opens a port number less than 1000 Linux normally requires root permissions to do this, HOWEVER in the Docker container we run `setcap cap_net_bind_service=+ep /usr/bin/loki` -+ -+This capability lets the loki process bind to a port less than 1000 when run as a non root user. -+ -+Not every environment will allow this capability however, it's possible to restrict this capability in linux. If this restriction is in place, you will be forced to run Loki with a config that has HTTP and GRPC ports above 1000. -+ -+#### Filesystem -+ -+**Please note the location Loki is looking for files with the provided config in the docker image has changed** -+ -+In 1.4.0 and earlier the included config file in the docker container was using directories: -+ -+``` -+/tmp/loki/index -+/tmp/loki/chunks -+``` -+ -+In 1.5.0 this has changed: -+ -+``` -+/loki/index -+/loki/chunks -+``` -+ -+This will mostly affect anyone using docker-compose or docker to run Loki and are specifying a volume to persist storage. -+ -+**There are two concerns to track here, one is the correct ownership of the files and the other is making sure your mounts updated to the new location.** -+ -+One possible upgrade path would look like this: -+ -+If I were running Loki with this command `docker run -d --name=loki --mount source=loki-data,target=/tmp/loki -p 3100:3100 grafana/loki:1.4.0` -+ -+This would mount a docker volume named `loki-data` to the `/temp/loki` folder which is where Loki will persist the `index` and `chunks` folder in 1.4.0 -+ -+To move to 1.5.0 I can do the following (please note that your container names and paths and volumes etc may be different): -+ -+``` -+docker stop loki -+docker rm loki -+docker run --rm --name=""loki-perm"" -it --mount source=loki-data,target=/mnt ubuntu /bin/bash -+cd /mnt -+chown -R 10001:10001 ./* -+exit -+docker run -d --name=loki --mount source=loki-data,target=/loki -p 3100:3100 grafana/loki:1.5.0 -+``` -+ -+Notice the change in the `target=/loki` for 1.5.0 to the new data directory location specified in the [included Loki config file](../../cmd/loki/loki-docker-config.yaml). -+ -+The intermediate step of using an ubuntu image to change the ownership of the Loki files to the new user might not be necessary if you can easily access these files to run the `chown` command directly. -+That is if you have access to `/var/lib/docker/volumes` or if you mounted to a different local filesystem directory, you can change the ownership directly without using a container. -+ -+ - ## 1.4.0 - - Loki 1.4.0 vendors Cortex v0.7.0-rc.0 which contains [several breaking config changes](https://github.com/cortexproject/cortex/blob/v0.7.0-rc.0/CHANGELOG.md). -@@ -84,4 +174,3 @@ If you attempt to add a v1.4.0 ingester to a ring created by Loki v1.2.0 or olde - This will result in distributors failing to write and a general ingestion failure for the system. - - If this happens to you, you will want to rollback your deployment immediately. You need to remove the v1.4.0 ingester from the ring ASAP, this should allow the existing ingesters to re-insert their tokens. You will also want to remove any v1.4.0 distributors as they will not understand the old ring either and will fail to send traffic. -- -diff --git a/production/docker-compose.yaml b/production/docker-compose.yaml -index 80edcd8651e52..b31014645673e 100644 ---- a/production/docker-compose.yaml -+++ b/production/docker-compose.yaml -@@ -5,7 +5,7 @@ networks: - - services: - loki: -- image: grafana/loki:1.4.1 -+ image: grafana/loki:1.5.0 - ports: - - ""3100:3100"" - command: -config.file=/etc/loki/local-config.yaml -@@ -13,7 +13,7 @@ services: - - loki - - promtail: -- image: grafana/promtail:1.4.1 -+ image: grafana/promtail:1.5.0 - volumes: - - /var/log:/var/log - command: -config.file=/etc/promtail/docker-config.yaml",unknown,"Prep 1.5.0 release (#2098) - -* Updating the Changelog and Upgrade guide for 1.5.0 release. - -Signed-off-by: Ed Welch - -* Changing release number in all the docs - -Signed-off-by: Ed Welch " -a2060efd6332464d3239ebce882e8d4a1fcd61e3,2019-09-26 18:33:56,polar3130,"Helm: Remove default value of storageClassName in loki/loki helm chart (#1058) - -* delete default name of storageClassName ""default"" - -* bump loki stack",False,"diff --git a/production/helm/loki-stack/Chart.yaml b/production/helm/loki-stack/Chart.yaml -index 687f2668abcde..ed11e9e5f2fca 100644 ---- a/production/helm/loki-stack/Chart.yaml -+++ b/production/helm/loki-stack/Chart.yaml -@@ -1,5 +1,5 @@ - name: loki-stack --version: 0.16.3 -+version: 0.16.4 - appVersion: v0.3.0 - kubeVersion: ""^1.10.0-0"" - description: ""Loki: like Prometheus, but for logs."" -diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml -index f0de1c0f9b7cb..3936cc566ccaa 100644 ---- a/production/helm/loki/Chart.yaml -+++ b/production/helm/loki/Chart.yaml -@@ -1,5 +1,5 @@ - name: loki --version: 0.14.2 -+version: 0.14.3 - appVersion: v0.3.0 - kubeVersion: ""^1.10.0-0"" - description: ""Loki: like Prometheus, but for logs."" -diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml -index 826e5ca793fbb..c82c68e794214 100644 ---- a/production/helm/loki/values.yaml -+++ b/production/helm/loki/values.yaml -@@ -96,7 +96,6 @@ persistence: - accessModes: - - ReadWriteOnce - size: 10Gi -- storageClassName: default - annotations: {} - # subPath: """" - # existingClaim:",Helm,"Remove default value of storageClassName in loki/loki helm chart (#1058) - -* delete default name of storageClassName ""default"" - -* bump loki stack" -94d1550e9cb60d35a175d1e186acdb70670d57bf,2022-01-04 14:18:39,Karen Miller,Docs: improve Promtail installation prose (#5017),False,"diff --git a/docs/sources/clients/promtail/installation.md b/docs/sources/clients/promtail/installation.md -index 380c867d92989..fb0fde141e68c 100644 ---- a/docs/sources/clients/promtail/installation.md -+++ b/docs/sources/clients/promtail/installation.md -@@ -3,8 +3,8 @@ title: Installation - --- - # Install Promtail - --Promtail is distributed as a [binary](#binary), [Docker container](#docker), and --[Helm chart](#helm). -+Promtail is distributed as a binary, in a Docker container, -+or there is a Helm chart to install it in a Kubernetes cluster. - - ## Binary - -@@ -20,8 +20,8 @@ docker pull grafana/promtail:2.0.0 - - ## Helm - --Make sure that Helm is --[installed](https://helm.sh/docs/using_helm/#installing-helm). -+Make sure that Helm is installed. -+See [Installing Helm](https://helm.sh/docs/intro/install/). - Then you can add Grafana's chart repository to Helm: - - ```bash -@@ -46,7 +46,7 @@ $ helm upgrade --install promtail grafana/promtail --set ""loki.serviceName=loki"" - - A `DaemonSet` will deploy Promtail on every node within a Kubernetes cluster. - --The DaemonSet deployment is great to collect the logs of all containers within a -+The DaemonSet deployment works well at collecting the logs of all containers within a - cluster. It's the best solution for a single-tenant model. - - ```yaml",Docs,improve Promtail installation prose (#5017) -4b4655300ccbd992816ac4013dbd79aef20bcd00,2024-11-15 19:22:36,renovate[bot],"fix(deps): update module golang.org/x/time to v0.8.0 (#14930) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/go.mod b/go.mod -index e190c8ade0070..dc03337ac00e1 100644 ---- a/go.mod -+++ b/go.mod -@@ -102,7 +102,7 @@ require ( - golang.org/x/net v0.30.0 - golang.org/x/sync v0.9.0 - golang.org/x/sys v0.27.0 -- golang.org/x/time v0.7.0 -+ golang.org/x/time v0.8.0 - google.golang.org/api v0.203.0 - google.golang.org/grpc v1.68.0 - gopkg.in/alecthomas/kingpin.v2 v2.2.6 -diff --git a/go.sum b/go.sum -index d83f44d81b152..0d498e901e62b 100644 ---- a/go.sum -+++ b/go.sum -@@ -3288,8 +3288,8 @@ golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxb - golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= - golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= - golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= --golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= --golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -+golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -+golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= - golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= - golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= - golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -diff --git a/vendor/modules.txt b/vendor/modules.txt -index 244ed31f53211..0789e209ff2cf 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -1931,7 +1931,7 @@ golang.org/x/text/secure/bidirule - golang.org/x/text/transform - golang.org/x/text/unicode/bidi - golang.org/x/text/unicode/norm --# golang.org/x/time v0.7.0 -+# golang.org/x/time v0.8.0 - ## explicit; go 1.18 - golang.org/x/time/rate - # golang.org/x/tools v0.23.0",fix,"update module golang.org/x/time to v0.8.0 (#14930) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -0ade92aef0658db021f4f52232925c99ff00824d,2018-11-19 23:39:59,Goutham Veeramachaneni,"Initial Mixin (#25) - -Signed-off-by: Goutham Veeramachaneni ",False,"diff --git a/.gitignore b/.gitignore -index 4ff530d89f31f..ec955370bddc9 100644 ---- a/.gitignore -+++ b/.gitignore -@@ -10,3 +10,4 @@ cmd/querier/querier - cmd/promtail/promtail - *.output - /images/ -+mixin/vendor/ -diff --git a/mixin/alerts.libsonnet b/mixin/alerts.libsonnet -new file mode 100644 -index 0000000000000..a43ac122415b0 ---- /dev/null -+++ b/mixin/alerts.libsonnet -@@ -0,0 +1,120 @@ -+{ -+ prometheusAlerts+:: { -+ groups+: [ -+ { -+ name: 'logish_alerts', -+ rules: [ -+ { -+ alert: 'LogishRequestErrors', -+ expr: ||| -+ 100 * sum(rate(logish_request_duration_seconds_count{status_code=~""5..""}[1m])) by (namespace, job, route) -+ / -+ sum(rate(logish_request_duration_seconds_count[1m])) by (namespace, job, route) -+ > 10 -+ |||, -+ 'for': '15m', -+ labels: { -+ severity: 'critical', -+ }, -+ annotations: { -+ message: ||| -+ {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf ""%.2f"" $value }}% errors. -+ |||, -+ }, -+ }, -+ { -+ alert: 'LogishRequestLatency', -+ expr: ||| -+ namespace_job_route:logish_request_duration_seconds:99quantile > 1 -+ |||, -+ 'for': '15m', -+ labels: { -+ severity: 'critical', -+ }, -+ annotations: { -+ message: ||| -+ {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf ""%.2f"" $value }}s 99th percentile latency. -+ |||, -+ }, -+ }, -+ ], -+ }, -+ { -+ name: 'logish_frontend_alerts', -+ rules: [ -+ { -+ alert: 'FrontendRequestErrors', -+ expr: ||| -+ 100 * sum(rate(cortex_gw_request_duration_seconds_count{status_code=~""5..""}[1m])) by (namespace, job, route) -+ / -+ sum(rate(cortex_gw_request_duration_seconds_count[1m])) by (namespace, job, route) -+ > 10 -+ |||, -+ 'for': '15m', -+ labels: { -+ severity: 'critical', -+ }, -+ annotations: { -+ message: ||| -+ {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf ""%.2f"" $value }}% errors. -+ |||, -+ }, -+ }, -+ { -+ alert: 'FrontendRequestLatency', -+ expr: ||| -+ namespace_job_route:cortex_gw_request_duration_seconds:99quantile > 1 -+ |||, -+ 'for': '15m', -+ labels: { -+ severity: 'critical', -+ }, -+ annotations: { -+ message: ||| -+ {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf ""%.2f"" $value }}s 99th percentile latency. -+ |||, -+ }, -+ }, -+ ], -+ }, -+ { -+ name: 'promtail_alerts', -+ rules: [ -+ { -+ alert: 'PromtailRequestsErrors', -+ expr: ||| -+ 100 * sum(rate(promtail_request_duration_seconds_count{status_code=~""5..|failed""}[1m])) by (namespace, job, route, instance) -+ / -+ sum(rate(promtail_request_duration_seconds_count[1m])) by (namespace, job, route, instance) -+ > 10 -+ |||, -+ 'for': '15m', -+ labels: { -+ severity: 'critical', -+ }, -+ annotations: { -+ message: ||| -+ {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf ""%.2f"" $value }}% errors. -+ |||, -+ }, -+ }, -+ { -+ alert: 'PromtailRequestLatency', -+ expr: ||| -+ job_status_code:promtail_request_duration_seconds:99quantile > 1 -+ |||, -+ 'for': '15m', -+ labels: { -+ severity: 'critical', -+ }, -+ annotations: { -+ message: ||| -+ {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf ""%.2f"" $value }}s 99th percentile latency. -+ |||, -+ }, -+ }, -+ ], -+ }, -+ ], -+ }, -+} -\ No newline at end of file -diff --git a/mixin/dashboards.libsonnet b/mixin/dashboards.libsonnet -new file mode 100644 -index 0000000000000..fe20a63e9fcd4 ---- /dev/null -+++ b/mixin/dashboards.libsonnet -@@ -0,0 +1,164 @@ -+local g = import 'grafana-builder/grafana.libsonnet'; -+ -+{ -+ dashboards+: { -+ 'logish-writes.json': -+ g.dashboard('Logish / Writes') -+ .addTemplate('cluster', 'kube_pod_container_info{image=~"".*logish.*""}', 'cluster') -+ .addTemplate('namespace', 'kube_pod_container_info{image=~"".*logish.*""}', 'namespace') -+ .addRow( -+ g.row('Frontend (cortex_gw)') -+ .addPanel( -+ g.panel('QPS') + -+ g.qpsPanel('cortex_gw_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/cortex-gw"", route=""cortex-write""}') -+ ) -+ .addPanel( -+ g.panel('Latency') + -+ g.latencyRecordingRulePanel('cortex_gw_request_duration_seconds', [g.selector.eq('job', '$namespace/cortex-gw'), g.selector.eq('route', 'cortex-write')], extra_selectors=[g.selector.eq('cluster', '$cluster')]) -+ ) -+ ) -+ .addRow( -+ g.row('Distributor') -+ .addPanel( -+ g.panel('QPS') + -+ g.qpsPanel('logish_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/distributor"", route=""api_prom_push""}') -+ ) -+ .addPanel( -+ g.panel('Latency') + -+ g.latencyRecordingRulePanel('logish_request_duration_seconds', [g.selector.eq('job', '$namespace/distributor'), g.selector.eq('route', 'api_prom_push')], extra_selectors=[g.selector.eq('cluster', '$cluster')]) -+ ) -+ ) -+ .addRow( -+ g.row('Ingester') -+ .addPanel( -+ g.panel('QPS') + -+ g.qpsPanel('logish_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/ingester"",route=""/logproto.Pusher/Push""}') -+ ) -+ .addPanel( -+ g.panel('Latency') + -+ g.latencyRecordingRulePanel('logish_request_duration_seconds', [g.selector.eq('job', '$namespace/ingester'), g.selector.eq('route', '/logproto.Pusher/Push')], extra_selectors=[g.selector.eq('cluster', '$cluster')]) -+ ) -+ ), -+ -+ 'logish-reads.json': -+ g.dashboard('logish / Reads') -+ .addTemplate('cluster', 'kube_pod_container_info{image=~"".*logish.*""}', 'cluster') -+ .addTemplate('namespace', 'kube_pod_container_info{image=~"".*logish.*""}', 'namespace') -+ .addRow( -+ g.row('Frontend (cortex_gw)') -+ .addPanel( -+ g.panel('QPS') + -+ g.qpsPanel('cortex_gw_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/cortex-gw"", route=""cortex-read""}') -+ ) -+ .addPanel( -+ g.panel('Latency') + -+ g.latencyRecordingRulePanel('cortex_gw_request_duration_seconds', [g.selector.eq('job', '$namespace/cortex-gw'), g.selector.eq('route', 'cortex-read')], extra_selectors=[g.selector.eq('cluster', '$cluster')]) -+ ) -+ ) -+ .addRow( -+ g.row('Querier') -+ .addPanel( -+ g.panel('QPS') + -+ g.qpsPanel('logish_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/querier""}') -+ ) -+ .addPanel( -+ g.panel('Latency') + -+ g.latencyRecordingRulePanel('logish_request_duration_seconds', [g.selector.eq('job', '$namespace/querier')], extra_selectors=[g.selector.eq('cluster', '$cluster')]) -+ ) -+ ) -+ .addRow( -+ g.row('Ingester') -+ .addPanel( -+ g.panel('QPS') + -+ g.qpsPanel('logish_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/ingester"",route!~""/logproto.Pusher/Push|metrics|ready|traces""}') -+ ) -+ .addPanel( -+ g.panel('Latency') + -+ g.latencyRecordingRulePanel('logish_request_duration_seconds', [g.selector.eq('job', '$namespace/ingester'), g.selector.nre('route', '/logproto.Pusher/Push|metrics|ready')], extra_selectors=[g.selector.eq('cluster', '$cluster')]) -+ ) -+ ), -+ -+ -+ 'logish-chunks.json': -+ g.dashboard('Logish / Chunks') -+ .addTemplate('cluster', 'kube_pod_container_info{image=~"".*logish.*""}', 'cluster') -+ .addTemplate('namespace', 'kube_pod_container_info{image=~"".*logish.*""}', 'namespace') -+ .addRow( -+ g.row('Active Series / Chunks') -+ .addPanel( -+ g.panel('Series') + -+ g.queryPanel('sum(logish_ingester_memory_chunks{cluster=""$cluster"", job=""$namespace/ingester""})', 'series'), -+ ) -+ .addPanel( -+ g.panel('Chunks per series') + -+ g.queryPanel('sum(logish_ingester_memory_chunks{cluster=""$cluster"", job=""$namespace/ingester""}) / sum(logish_ingester_memory_series{job=""$namespace/ingester""})', 'chunks'), -+ ) -+ ) -+ .addRow( -+ g.row('Flush Stats') -+ .addPanel( -+ g.panel('Utilization') + -+ g.latencyPanel('logish_ingester_chunk_utilization', '{cluster=""$cluster"", job=""$namespace/ingester""}', multiplier='1') + -+ { yaxes: g.yaxes('percentunit') }, -+ ) -+ .addPanel( -+ g.panel('Age') + -+ g.latencyPanel('logish_ingester_chunk_age_seconds', '{cluster=""$cluster"", job=""$namespace/ingester""}'), -+ ), -+ ) -+ .addRow( -+ g.row('Flush Stats') -+ .addPanel( -+ g.panel('Size') + -+ g.latencyPanel('logish_ingester_chunk_length', '{cluster=""$cluster"", job=""$namespace/ingester""}', multiplier='1') + -+ { yaxes: g.yaxes('short') }, -+ ) -+ .addPanel( -+ g.panel('Entries') + -+ g.queryPanel('sum(rate(logish_chunk_store_index_entries_per_chunk_sum{cluster=""$cluster"", job=""$namespace/ingester""}[5m])) / sum(rate(logish_chunk_store_index_entries_per_chunk_count{cluster=""$cluster"", job=""$namespace/ingester""}[5m]))', 'entries'), -+ ), -+ ) -+ .addRow( -+ g.row('Flush Stats') -+ .addPanel( -+ g.panel('Queue Length') + -+ g.queryPanel('logish_ingester_flush_queue_length{cluster=""$cluster"", job=""$namespace/ingester""}', '{{instance}}'), -+ ) -+ .addPanel( -+ g.panel('Flush Rate') + -+ g.qpsPanel('logish_ingester_chunk_age_seconds_count{cluster=""$cluster"", job=""$namespace/ingester""}'), -+ ), -+ ), -+ -+ 'logish-frontend.json': -+ g.dashboard('Logish / Frontend') -+ .addTemplate('cluster', 'kube_pod_container_info{image=~"".*logish.*""}', 'cluster') -+ .addTemplate('namespace', 'kube_pod_container_info{image=~"".*logish.*""}', 'namespace') -+ .addRow( -+ g.row('logish Reqs (cortex_gw)') -+ .addPanel( -+ g.panel('QPS') + -+ g.qpsPanel('cortex_gw_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/cortex-gw""}') -+ ) -+ .addPanel( -+ g.panel('Latency') + -+ g.latencyRecordingRulePanel('cortex_gw_request_duration_seconds', [g.selector.eq('job', '$namespace/cortex-gw')], extra_selectors=[g.selector.eq('cluster', '$cluster')]) -+ ) -+ ), -+ 'promtail.json': -+ g.dashboard('Logish / Promtail') -+ .addTemplate('cluster', 'kube_pod_container_info{image=~"".*logish.*""}', 'cluster') -+ .addTemplate('namespace', 'kube_pod_container_info{image=~"".*logish.*""}', 'namespace') -+ .addRow( -+ g.row('promtail Reqs') -+ .addPanel( -+ g.panel('QPS') + -+ g.qpsPanel('promtail_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/promtail""}') -+ ) -+ .addPanel( -+ g.panel('Latency') + -+ g.latencyRecordingRulePanel('promtail_request_duration_seconds', [g.selector.eq('job', '$namespace/promtail')], extra_selectors=[g.selector.eq('cluster', '$cluster')]) -+ ) -+ ) -+ }, -+} -\ No newline at end of file -diff --git a/mixin/jsonnetfile.json b/mixin/jsonnetfile.json -new file mode 100644 -index 0000000000000..3a5d8ad2223e4 ---- /dev/null -+++ b/mixin/jsonnetfile.json -@@ -0,0 +1,14 @@ -+{ -+ ""dependencies"": [ -+ { -+ ""name"": ""grafana-builder"", -+ ""source"": { -+ ""git"": { -+ ""remote"": ""https://github.com/kausalco/public"", -+ ""subdir"": ""grafana-builder"" -+ } -+ }, -+ ""version"": ""master"" -+ } -+ ] -+} -\ No newline at end of file -diff --git a/mixin/jsonnetfile.lock.json b/mixin/jsonnetfile.lock.json -new file mode 100644 -index 0000000000000..933c3204b739c ---- /dev/null -+++ b/mixin/jsonnetfile.lock.json -@@ -0,0 +1,14 @@ -+{ -+ ""dependencies"": [ -+ { -+ ""name"": ""grafana-builder"", -+ ""source"": { -+ ""git"": { -+ ""remote"": ""https://github.com/kausalco/public"", -+ ""subdir"": ""grafana-builder"" -+ } -+ }, -+ ""version"": ""cab274f882aae97ad6add33590a3b149e6f8eeac"" -+ } -+ ] -+} -\ No newline at end of file -diff --git a/mixin/mixin.libsonnet b/mixin/mixin.libsonnet -new file mode 100644 -index 0000000000000..a684acd630f0f ---- /dev/null -+++ b/mixin/mixin.libsonnet -@@ -0,0 +1,3 @@ -+(import 'dashboards.libsonnet') + -+(import 'alerts.libsonnet') + -+(import 'recording_rules.libsonnet') -\ No newline at end of file -diff --git a/mixin/recording_rules.libsonnet b/mixin/recording_rules.libsonnet -new file mode 100644 -index 0000000000000..18404e2fe9749 ---- /dev/null -+++ b/mixin/recording_rules.libsonnet -@@ -0,0 +1,43 @@ -+local histogramRules(metric, labels) = -+ local vars = { -+ metric: metric, -+ labels_underscore: std.join('_', labels), -+ labels_comma: std.join(', ', labels), -+ }; -+ [ -+ { -+ record: '%(labels_underscore)s:%(metric)s:99quantile' % vars, -+ expr: 'histogram_quantile(0.99, sum(rate(%(metric)s_bucket[5m])) by (le, %(labels_comma)s))' % vars, -+ }, -+ { -+ record: '%(labels_underscore)s:%(metric)s:50quantile' % vars, -+ expr: 'histogram_quantile(0.50, sum(rate(%(metric)s_bucket[5m])) by (le, %(labels_comma)s))' % vars, -+ }, -+ { -+ record: '%(labels_underscore)s:%(metric)s:avg' % vars, -+ expr: 'sum(rate(%(metric)s_sum[5m])) by (%(labels_comma)s) / sum(rate(%(metric)s_count[5m])) by (%(labels_comma)s)' % vars, -+ }, -+ ]; -+ -+{ -+ prometheus_rules+:: { -+ groups+: [{ -+ name: 'logish_rules', -+ rules: -+ histogramRules('logish_request_duration_seconds', ['job']) + -+ histogramRules('logish_request_duration_seconds', ['job', 'route']) + -+ histogramRules('logish_request_duration_seconds', ['namespace', 'job', 'route']), -+ }, { -+ name: 'logish_frontend_rules', -+ rules: -+ histogramRules('cortex_gw_request_duration_seconds', ['job']) + -+ histogramRules('cortex_gw_request_duration_seconds', ['job', 'route']) + -+ histogramRules('cortex_gw_request_duration_seconds', ['namespace', 'job', 'route']), -+ }, { -+ name: 'promtail_rules', -+ rules: -+ histogramRules('promtail_request_duration_seconds', ['job']) + -+ histogramRules('promtail_request_duration_seconds', ['job', 'status_code']), -+ }], -+ }, -+} -\ No newline at end of file",unknown,"Initial Mixin (#25) - -Signed-off-by: Goutham Veeramachaneni " -93a5a71e621f742b47fdb49d4f11ae6c0ead27b9,2022-09-05 14:51:26,李国忠,"[doc] logql: logql engine support exec vector(0) grama (#7044) - - - -**What this PR does / why we need it**: -logql engine support exec vector(0) grama. -new PR of :https://github.com/grafana/loki/pull/7023 - -**Which issue(s) this PR fixes**: -Fixes #6946 - -**Special notes for your reviewer**: -preview -![image](https://user-images.githubusercontent.com/9583245/188355186-9a74d1ce-f062-45e8-8516-52c89383eeec.png) - - -**Checklist** -- [ ] Documentation added -- [ ] Tests updated -- [ ] Is this an important fix or new feature? Add an entry in the `CHANGELOG.md`. -- [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/upgrading/_index.md` - -Co-authored-by: Danny Kopping ",False,"diff --git a/CHANGELOG.md b/CHANGELOG.md -index 26938816d84e1..81d0d1328ebaa 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -5,6 +5,7 @@ - #### Loki - - ##### Enhancements -+* [7023](https://github.com/grafana/loki/pull/7023) **liguozhong**: logql engine support exec `vector(0)` grammar. - * [6983](https://github.com/grafana/loki/pull/6983) **slim-bean**: `__timestamp__` and `__line__` are now available in the logql `label_format` query stage. - * [6821](https://github.com/grafana/loki/pull/6821) **kavirajk**: Introduce new cache type `embedded-cache` which is an in-process cache system that runs loki without the need for an external cache (like memcached, redis, etc). It can be run in two modes `distributed: false` (default, and same as old `fifocache`) and `distributed: true` which runs cache in distributed fashion sharding keys across peers if Loki is run in microservices or SSD mode. - * [6691](https://github.com/grafana/loki/pull/6691) **dannykopping**: Update production-ready Loki cluster in docker-compose -diff --git a/docs/sources/logql/metric_queries.md b/docs/sources/logql/metric_queries.md -index a26f668b33f76..2a2d03bdcdf53 100644 ---- a/docs/sources/logql/metric_queries.md -+++ b/docs/sources/logql/metric_queries.md -@@ -121,3 +121,20 @@ The `without` clause removes the listed labels from the resulting vector, keepin - The `by` clause does the opposite, dropping labels that are not listed in the clause, even if their label values are identical between all elements of the vector. - - See [vector aggregation examples](../query_examples/#vector-aggregation-examples) for query examples that use vector aggregation expressions. -+ -+## Functions -+ -+LogQL supports a set of built-in functions. -+ -+- `vector(s scalar)`: returns the scalar s as a vector with no labels. This behaves identically to the [Prometheus `vector()` function](https://prometheus.io/docs/prometheus/latest/querying/functions/#vector). -+ `vector` is mainly used to return a value for a series that would otherwise return nothing; this can be useful when using LogQL to define an alert. -+ -+Examples: -+ -+- Count all the log lines within the last five minutes for the traefik namespace. -+ -+ ```logql -+ sum(count_over_time({namespace=""traefik""}[5m])) # will return nothing -+ or -+ vector(0) # will return 0 -+ ```",unknown,"[doc] logql: logql engine support exec vector(0) grama (#7044) - - - -**What this PR does / why we need it**: -logql engine support exec vector(0) grama. -new PR of :https://github.com/grafana/loki/pull/7023 - -**Which issue(s) this PR fixes**: -Fixes #6946 - -**Special notes for your reviewer**: -preview -![image](https://user-images.githubusercontent.com/9583245/188355186-9a74d1ce-f062-45e8-8516-52c89383eeec.png) - - -**Checklist** -- [ ] Documentation added -- [ ] Tests updated -- [ ] Is this an important fix or new feature? Add an entry in the `CHANGELOG.md`. -- [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/upgrading/_index.md` - -Co-authored-by: Danny Kopping " -46d3dec010f09158c2ad2c95c1958455f01d0e07,2022-10-13 12:09:11,Joel Verezhak,"[loki-canary] Allow insecure TLS connections (#7398) - -**What this PR does / why we need it**: -This change allows client certificates signed by a self-signed -certificate authority to be used by the Loki canary. - -**Which issue(s) this PR fixes**: -Fixes #4366 - -**Special notes for your reviewer**: -This has been tested on linux amd64 with self-signed certificates. - -**Checklist** -- [x] Reviewed the `CONTRIBUTING.md` guide -- [x] Documentation added -- [x] Tests updated -- [x] `CHANGELOG.md` updated -- [x] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/upgrading/_index.md`",False,"diff --git a/CHANGELOG.md b/CHANGELOG.md -index 33d526c0f4797..e5a5224dc126e 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -66,6 +66,7 @@ - #### Fluent Bit - - #### Loki Canary -+* [7398](https://github.com/grafana/loki/pull/7398) **verejoel**: Allow insecure TLS connections - - #### Jsonnet - * [6189](https://github.com/grafana/loki/pull/6189) **irizzant**: Add creation of a `ServiceMonitor` object for Prometheus scraping through configuration parameter `create_service_monitor`. Simplify mixin usage by adding (https://github.com/prometheus-operator/kube-prometheus) library. -diff --git a/cmd/loki-canary/main.go b/cmd/loki-canary/main.go -index 881412a25b8b2..67903e89516bc 100644 ---- a/cmd/loki-canary/main.go -+++ b/cmd/loki-canary/main.go -@@ -53,6 +53,7 @@ func main() { - certFile := flag.String(""cert-file"", """", ""Client PEM encoded X.509 certificate for optional use with TLS connection to Loki"") - keyFile := flag.String(""key-file"", """", ""Client PEM encoded X.509 key for optional use with TLS connection to Loki"") - caFile := flag.String(""ca-file"", """", ""Client certificate authority for optional use with TLS connection to Loki"") -+ insecureSkipVerify := flag.Bool(""insecure"", false, ""Allow insecure TLS connections"") - user := flag.String(""user"", """", ""Loki username."") - pass := flag.String(""pass"", """", ""Loki password. This credential should have both read and write permissions to Loki endpoints"") - tenantID := flag.String(""tenant-id"", """", ""Tenant ID to be set in X-Scope-OrgID header."") -@@ -113,7 +114,7 @@ func main() { - tc.CAFile = *caFile - tc.CertFile = *certFile - tc.KeyFile = *keyFile -- tc.InsecureSkipVerify = false -+ tc.InsecureSkipVerify = *insecureSkipVerify - - var err error - tlsConfig, err = config.NewTLSConfig(&tc) -diff --git a/docs/sources/operations/loki-canary.md b/docs/sources/operations/loki-canary.md -index 051800654828b..1169728f4581c 100644 ---- a/docs/sources/operations/loki-canary.md -+++ b/docs/sources/operations/loki-canary.md -@@ -303,74 +303,75 @@ All options: - - ``` - -addr string -- The Loki server URL:Port, e.g. loki:3100 -+ The Loki server URL:Port, e.g. loki:3100 - -buckets int -- Number of buckets in the response_latency histogram (default 10) -+ Number of buckets in the response_latency histogram (default 10) -+ -ca-file string -+ Client certificate authority for optional use with TLS connection to Loki -+ -cert-file string -+ Client PEM encoded X.509 certificate for optional use with TLS connection to Loki -+ -insecure -+ Allow insecure TLS connections - -interval duration -- Duration between log entries (default 1s) -+ Duration between log entries (default 1s) -+ -key-file string -+ Client PEM encoded X.509 key for optional use with TLS connection to Loki - -labelname string -- The label name for this instance of Loki Canary to use in the log selector -- (default ""name"") -+ The label name for this instance of loki-canary to use in the log selector (default ""name"") - -labelvalue string -- The unique label value for this instance of Loki Canary to use in the log selector -- (default ""loki-canary"") -+ The unique label value for this instance of loki-canary to use in the log selector (default ""loki-canary"") -+ -max-wait duration -+ Duration to keep querying Loki for missing websocket entries before reporting them missing (default 5m0s) - -metric-test-interval duration -- The interval the metric test query should be run (default 1h0m0s) -+ The interval the metric test query should be run (default 1h0m0s) - -metric-test-range duration -- The range value [24h] used in the metric test instant-query. This value is truncated -- to the running time of the canary until this value is reached (default 24h0m0s) -+ The range value [24h] used in the metric test instant-query. Note: this value is truncated to the running time of the canary until this value is reached (default 24h0m0s) - -out-of-order-max duration -- Maximum amount of time (in seconds) in the past an out of order entry may have as a -- timestamp. (default 60s) -+ Maximum amount of time to go back for out of order entries (in seconds). (default 1m0s) - -out-of-order-min duration -- Minimum amount of time (in seconds) in the past an out of order entry may have as a -- timestamp. (default 30s) -+ Minimum amount of time to go back for out of order entries (in seconds). (default 30s) - -out-of-order-percentage int -- Percentage (0-100) of log entries that should be sent out of order -+ Percentage (0-100) of log entries that should be sent out of order. - -pass string -- Loki password. This credential should have both read and write permissions to Loki endpoints -+ Loki password. This credential should have both read and write permissions to Loki endpoints - -port int -- Port which Loki Canary should expose metrics (default 3500) -+ Port which loki-canary should expose metrics (default 3500) - -pruneinterval duration -- Frequency to check sent versus received logs, and also the frequency at which queries -- for missing logs will be dispatched to Loki, and the frequency spot check queries are run -- (default 1m0s) -+ Frequency to check sent vs received logs, also the frequency which queries for missing logs will be dispatched to loki (default 1m0s) - -push -- Push the logs directly to given Loki address -+ Push the logs directly to given Loki address - -query-timeout duration -- How long to wait for a query response from Loki (default 10s) -+ How long to wait for a query response from Loki (default 10s) - -size int -- Size in bytes of each log line (default 100) -+ Size in bytes of each log line (default 100) -+ -spot-check-initial-wait duration -+ How long should the spot check query wait before starting to check for entries (default 10s) - -spot-check-interval duration -- Interval that a single result will be kept from sent entries and spot-checked against -- Loki. For example, with the 15 minute default, one entry every 15 minutes will be saved, -- and then queried again every 15 minutes until the time defined by spot-check-max is -- reached (default 15m0s) -+ Interval that a single result will be kept from sent entries and spot-checked against Loki, e.g. 15min default one entry every 15 min will be saved and then queried again every 15min until spot-check-max is reached (default 15m0s) - -spot-check-max duration -- How far back to check a spot check an entry before dropping it (default 4h0m0s) -+ How far back to check a spot check entry before dropping it (default 4h0m0s) - -spot-check-query-rate duration -- Interval that Loki Canary will query Loki for the current list of all spot check entries -- (default 1m0s) -+ Interval that the canary will query Loki for the current list of all spot check entries (default 1m0s) - -streamname string -- The stream name for this instance of Loki Canary to use in the log selector -- (default ""stream"") -+ The stream name for this instance of loki-canary to use in the log selector (default ""stream"") - -streamvalue string -- The unique stream value for this instance of Loki Canary to use in the log selector -- (default ""stdout"") -+ The unique stream value for this instance of loki-canary to use in the log selector (default ""stdout"") - -tenant-id string -- Tenant ID to be set in X-Scope-OrgID header. -+ Tenant ID to be set in X-Scope-OrgID header. - -tls -- Does the Loki connection use TLS? -+ Does the loki connection use TLS? - -user string -- Loki user name -+ Loki username. - -version -- Print this build's version information -+ Print this builds version information - -wait duration -- Duration to wait for log entries before reporting them as lost (default 1m0s) -+ Duration to wait for log entries on websocket before querying loki for them (default 1m0s) - -write-max-backoff duration - Maximum backoff time between retries (default 5m0s) - -write-max-retries int - Maximum number of retries when push a log entry (default 10) - -write-min-backoff duration - Initial backoff time before first retry (default 500ms) -+ -write-timeout duration -+ How long to wait write response from Loki (default 10s) - ```",unknown,"[loki-canary] Allow insecure TLS connections (#7398) - -**What this PR does / why we need it**: -This change allows client certificates signed by a self-signed -certificate authority to be used by the Loki canary. - -**Which issue(s) this PR fixes**: -Fixes #4366 - -**Special notes for your reviewer**: -This has been tested on linux amd64 with self-signed certificates. - -**Checklist** -- [x] Reviewed the `CONTRIBUTING.md` guide -- [x] Documentation added -- [x] Tests updated -- [x] `CHANGELOG.md` updated -- [x] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/upgrading/_index.md`" -f22527f3d91b58c230b8ea1f831b5221060d1bbe,2025-02-10 21:05:55,Paul Rogers,fix(ci): Pass image tag details to logcli docker build (#16159),False,"diff --git a/cmd/logcli/Dockerfile b/cmd/logcli/Dockerfile -index ddccaff2a7813..c35f7fbe07dd3 100644 ---- a/cmd/logcli/Dockerfile -+++ b/cmd/logcli/Dockerfile -@@ -1,9 +1,10 @@ - ARG GO_VERSION=1.23 -+ARG IMAGE_TAG - FROM golang:${GO_VERSION} AS build - - COPY . /src/loki - WORKDIR /src/loki --RUN make clean && make BUILD_IN_CONTAINER=false logcli -+RUN make clean && make BUILD_IN_CONTAINER=false IMAGE_TAG=${IMAGE_TAG} logcli - - - FROM gcr.io/distroless/static:debug",fix,Pass image tag details to logcli docker build (#16159) -d1ae91cea72ba80de744f6ba315cfea525c6924f,2020-02-05 20:22:37,Robert Fratto,ci: pin plugins/manifest image tag (#1637),False,"diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet -index a43b131c28498..1531870334c20 100644 ---- a/.drone/drone.jsonnet -+++ b/.drone/drone.jsonnet -@@ -113,7 +113,7 @@ local manifest(apps) = pipeline('manifest') { - steps: std.foldl( - function(acc, app) acc + [{ - name: 'manifest-' + app, -- image: 'plugins/manifest', -+ image: 'plugins/manifest:1.2.3', - settings: { - // the target parameter is abused for the app's name, - // as it is unused in spec mode. See docker-manifest.tmpl -diff --git a/.drone/drone.yml b/.drone/drone.yml -index d159080238a81..46251c351c50e 100644 ---- a/.drone/drone.yml -+++ b/.drone/drone.yml -@@ -504,7 +504,7 @@ platform: - - steps: - - name: manifest-promtail -- image: plugins/manifest -+ image: plugins/manifest:1.2.3 - settings: - password: - from_secret: docker_password -@@ -516,7 +516,7 @@ steps: - - clone - - - name: manifest-loki -- image: plugins/manifest -+ image: plugins/manifest:1.2.3 - settings: - password: - from_secret: docker_password -@@ -529,7 +529,7 @@ steps: - - manifest-promtail - - - name: manifest-loki-canary -- image: plugins/manifest -+ image: plugins/manifest:1.2.3 - settings: - password: - from_secret: docker_password",ci,pin plugins/manifest image tag (#1637) -0b629147ce591d598ff51d3c4b9bd9acb6510bcb,2019-11-05 22:58:34,Wojtek,Fixed whitespaces in example ingress yaml (#1082),False,"diff --git a/docs/installation/helm.md b/docs/installation/helm.md -index 5cf2e711ea83f..0f3a42cab3f22 100644 ---- a/docs/installation/helm.md -+++ b/docs/installation/helm.md -@@ -95,21 +95,21 @@ Sample Helm template for Ingress: - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: --annotations: -+ annotations: - kubernetes.io/ingress.class: {{ .Values.ingress.class }} - ingress.kubernetes.io/auth-type: ""basic"" - ingress.kubernetes.io/auth-secret: {{ .Values.ingress.basic.secret }} --name: loki -+ name: loki - spec: --rules: --- host: {{ .Values.ingress.host }} -+ rules: -+ - host: {{ .Values.ingress.host }} - http: -- paths: -- - backend: -- serviceName: loki -- servicePort: 3100 --tls: --- secretName: {{ .Values.ingress.cert }} -+ paths: -+ - backend: -+ serviceName: loki -+ servicePort: 3100 -+ tls: -+ - secretName: {{ .Values.ingress.cert }} - hosts: - - {{ .Values.ingress.host }} - ```",unknown,Fixed whitespaces in example ingress yaml (#1082) -2ac409c23e834474b89f7f0974a859c271219e7d,2021-11-04 19:24:08,Ed Welch,"Build: simplify how protos are built (#4639) - -* we always seem to be chasing our tails with how protos are generated and Makes use of timestamps to determine if files should be recompiled. Instead of touching files and altering timestamps always delete the compiled proto files when calling `protos` to make sure they are compiled every time. - -Removed targets to build protos, yaccs, and ragel files when trying to build loki or the canary. This isn't necesary, if you are changing these files you would know you need to build them and the `check-generated-files` should catch any changes to them not committed. - -* rm -rf - -* we have a race between check-generated-files and our other steps, so let that run first. - -also removing `check-generated-files` from the `all` target because its redundant with a separate step and could also race with the parallel lint - -* remove TOUCH_PROTOS - -* more cleanup of TOUCH_PROTOS",False,"diff --git a/.circleci/config.yml b/.circleci/config.yml -index 90a3e4c2604bd..f54f68ca03777 100644 ---- a/.circleci/config.yml -+++ b/.circleci/config.yml -@@ -55,9 +55,6 @@ jobs: - steps: - - checkout - - setup_remote_docker -- - run: -- name: touch-protos -- command: make touch-protos - - run: - name: build - command: make GOOS=windows GOGC=10 promtail -diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet -index b2efdbe2323bd..eca7e77caf4c7 100644 ---- a/.drone/drone.jsonnet -+++ b/.drone/drone.jsonnet -@@ -210,7 +210,6 @@ local promtail(arch) = pipeline('promtail-' + arch) + arch_image(arch) { - when: condition('exclude').tagMain, - settings+: { - dry_run: true, -- build_args: ['TOUCH_PROTOS=1'], - }, - }, - ] + [ -@@ -218,9 +217,7 @@ local promtail(arch) = pipeline('promtail-' + arch) + arch_image(arch) { - clients_docker(arch, 'promtail') { - depends_on: ['image-tag'], - when: condition('include').tagMain, -- settings+: { -- build_args: ['TOUCH_PROTOS=1'], -- }, -+ settings+: {}, - }, - ], - depends_on: ['check'], -@@ -249,9 +246,7 @@ local lambda_promtail(tags='') = pipeline('lambda-promtail'){ - lambda_promtail_ecr('lambda-promtail') { - depends_on: ['image-tag'], - when: condition('include').tagMain, -- settings+: { -- build_args: ['TOUCH_PROTOS=1'], -- }, -+ settings+: {}, - }, - ], - depends_on: ['check'], -@@ -265,7 +260,6 @@ local multiarch_image(arch) = pipeline('docker-' + arch) + arch_image(arch) { - when: condition('exclude').tagMain, - settings+: { - dry_run: true, -- build_args: ['TOUCH_PROTOS=1'], - }, - } - for app in apps -@@ -274,9 +268,7 @@ local multiarch_image(arch) = pipeline('docker-' + arch) + arch_image(arch) { - docker(arch, app) { - depends_on: ['image-tag'], - when: condition('include').tagMain, -- settings+: { -- build_args: ['TOUCH_PROTOS=1'], -- }, -+ settings+: {}, - } - for app in apps - ], -@@ -323,9 +315,9 @@ local manifest(apps) = pipeline('manifest') { - path: 'loki', - }, - steps: [ -- make('test', container=false) { depends_on: ['clone'] }, -- make('lint', container=false) { depends_on: ['clone'] }, - make('check-generated-files', container=false) { depends_on: ['clone'] }, -+ make('test', container=false) { depends_on: ['clone','check-generated-files'] }, -+ make('lint', container=false) { depends_on: ['clone','check-generated-files'] }, - make('check-mod', container=false) { depends_on: ['clone', 'test', 'lint'] }, - { - name: 'shellcheck', -diff --git a/.drone/drone.yml b/.drone/drone.yml -index add72a6c397d7..3cbb6adbdbb1d 100644 ---- a/.drone/drone.yml -+++ b/.drone/drone.yml -@@ -3,23 +3,25 @@ kind: pipeline - name: check - steps: - - commands: -- - make BUILD_IN_CONTAINER=false test -+ - make BUILD_IN_CONTAINER=false check-generated-files - depends_on: - - clone - image: grafana/loki-build-image:0.18.0 -- name: test -+ name: check-generated-files - - commands: -- - make BUILD_IN_CONTAINER=false lint -+ - make BUILD_IN_CONTAINER=false test - depends_on: - - clone -+ - check-generated-files - image: grafana/loki-build-image:0.18.0 -- name: lint -+ name: test - - commands: -- - make BUILD_IN_CONTAINER=false check-generated-files -+ - make BUILD_IN_CONTAINER=false lint - depends_on: - - clone -+ - check-generated-files - image: grafana/loki-build-image:0.18.0 -- name: check-generated-files -+ name: lint - - commands: - - make BUILD_IN_CONTAINER=false check-mod - depends_on: -@@ -73,8 +75,6 @@ steps: - image: plugins/docker - name: build-loki-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: cmd/loki/Dockerfile - dry_run: true - password: -@@ -93,8 +93,6 @@ steps: - image: plugins/docker - name: build-loki-canary-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: cmd/loki-canary/Dockerfile - dry_run: true - password: -@@ -113,8 +111,6 @@ steps: - image: plugins/docker - name: build-logcli-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: cmd/logcli/Dockerfile - dry_run: true - password: -@@ -133,8 +129,6 @@ steps: - image: plugins/docker - name: publish-loki-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: cmd/loki/Dockerfile - dry_run: false - password: -@@ -153,8 +147,6 @@ steps: - image: plugins/docker - name: publish-loki-canary-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: cmd/loki-canary/Dockerfile - dry_run: false - password: -@@ -173,8 +165,6 @@ steps: - image: plugins/docker - name: publish-logcli-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: cmd/logcli/Dockerfile - dry_run: false - password: -@@ -208,8 +198,6 @@ steps: - image: plugins/docker - name: build-loki-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: cmd/loki/Dockerfile - dry_run: true - password: -@@ -228,8 +216,6 @@ steps: - image: plugins/docker - name: build-loki-canary-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: cmd/loki-canary/Dockerfile - dry_run: true - password: -@@ -248,8 +234,6 @@ steps: - image: plugins/docker - name: build-logcli-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: cmd/logcli/Dockerfile - dry_run: true - password: -@@ -268,8 +252,6 @@ steps: - image: plugins/docker - name: publish-loki-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: cmd/loki/Dockerfile - dry_run: false - password: -@@ -288,8 +270,6 @@ steps: - image: plugins/docker - name: publish-loki-canary-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: cmd/loki-canary/Dockerfile - dry_run: false - password: -@@ -308,8 +288,6 @@ steps: - image: plugins/docker - name: publish-logcli-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: cmd/logcli/Dockerfile - dry_run: false - password: -@@ -343,8 +321,6 @@ steps: - image: plugins/docker - name: build-loki-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: cmd/loki/Dockerfile - dry_run: true - password: -@@ -363,8 +339,6 @@ steps: - image: plugins/docker - name: build-loki-canary-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: cmd/loki-canary/Dockerfile - dry_run: true - password: -@@ -383,8 +357,6 @@ steps: - image: plugins/docker - name: build-logcli-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: cmd/logcli/Dockerfile - dry_run: true - password: -@@ -403,8 +375,6 @@ steps: - image: plugins/docker - name: publish-loki-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: cmd/loki/Dockerfile - dry_run: false - password: -@@ -423,8 +393,6 @@ steps: - image: plugins/docker - name: publish-loki-canary-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: cmd/loki-canary/Dockerfile - dry_run: false - password: -@@ -443,8 +411,6 @@ steps: - image: plugins/docker - name: publish-logcli-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: cmd/logcli/Dockerfile - dry_run: false - password: -@@ -478,8 +444,6 @@ steps: - image: plugins/docker - name: build-promtail-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: clients/cmd/promtail/Dockerfile - dry_run: true - password: -@@ -498,8 +462,6 @@ steps: - image: plugins/docker - name: publish-promtail-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: clients/cmd/promtail/Dockerfile - dry_run: false - password: -@@ -533,8 +495,6 @@ steps: - image: plugins/docker - name: build-promtail-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: clients/cmd/promtail/Dockerfile - dry_run: true - password: -@@ -553,8 +513,6 @@ steps: - image: plugins/docker - name: publish-promtail-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: clients/cmd/promtail/Dockerfile - dry_run: false - password: -@@ -588,8 +546,6 @@ steps: - image: plugins/docker - name: build-promtail-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: clients/cmd/promtail/Dockerfile.arm32 - dry_run: true - password: -@@ -608,8 +564,6 @@ steps: - image: plugins/docker - name: publish-promtail-image - settings: -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: clients/cmd/promtail/Dockerfile.arm32 - dry_run: false - password: -@@ -925,8 +879,6 @@ steps: - settings: - access_key: - from_secret: ecr_key -- build_args: -- - TOUCH_PROTOS=1 - dockerfile: tools/lambda-promtail/Dockerfile - dry_run: false - region: us-east-1 -@@ -984,6 +936,6 @@ kind: secret - name: deploy_config - --- - kind: signature --hmac: 55440faa2728a5b8abbd213c2cf198e01f00201ba7143391924da1b9aa02c350 -+hmac: b51ec8dfc84d0be83827fc851b21b81e1091886be480d675f51485b647e58001 - - ... -diff --git a/Makefile b/Makefile -index 0b9c510017701..a6ea61fecff53 100644 ---- a/Makefile -+++ b/Makefile -@@ -1,5 +1,5 @@ - .DEFAULT_GOAL := all --.PHONY: all images check-generated-files logcli loki loki-debug promtail promtail-debug loki-canary lint test clean yacc protos touch-protobuf-sources touch-protos format -+.PHONY: all images check-generated-files logcli loki loki-debug promtail promtail-debug loki-canary lint test clean yacc protos touch-protobuf-sources format - .PHONY: docker-driver docker-driver-clean docker-driver-enable docker-driver-push - .PHONY: fluent-bit-image, fluent-bit-push, fluent-bit-test - .PHONY: fluentd-image, fluentd-push, fluentd-test -@@ -8,6 +8,7 @@ - .PHONY: benchmark-store, drone, check-mod - .PHONY: migrate migrate-image lint-markdown ragel - .PHONY: validate-example-configs generate-example-config-doc check-example-config-doc -+.PHONY: clean clean-protos - - SHELL = /usr/bin/env bash - -@@ -128,10 +129,10 @@ binfmt: - ################ - # Main Targets # - ################ --all: promtail logcli loki loki-canary check-generated-files -+all: promtail logcli loki loki-canary - - # This is really a check for the CI to make sure generated files are built and checked in manually --check-generated-files: touch-protobuf-sources yacc ragel protos clients/pkg/promtail/server/ui/assets_vfsdata.go -+check-generated-files: yacc ragel protos clients/pkg/promtail/server/ui/assets_vfsdata.go - @if ! (git diff --exit-code $(YACC_GOS) $(RAGEL_GOS) $(PROTO_GOS) $(PROMTAIL_GENERATED_FILE)); then \ - echo ""\nChanges found in generated files""; \ - echo ""Run 'make check-generated-files' and commit the changes to fix this error.""; \ -@@ -140,14 +141,6 @@ check-generated-files: touch-protobuf-sources yacc ragel protos clients/pkg/prom - exit 1; \ - fi - --# Trick used to ensure that protobuf files are always compiled even if not changed, because the --# tooling may have been upgraded and the compiled output may be different. We're not using a --# PHONY target so that we can control where we want to touch it. --touch-protobuf-sources: -- for def in $(PROTO_DEFS); do \ -- touch $$def; \ -- done -- - ########## - # Logcli # - ########## -@@ -165,8 +158,8 @@ cmd/logcli/logcli: $(APP_GO_FILES) cmd/logcli/main.go - # Loki # - ######## - --loki: protos yacc ragel cmd/loki/loki --loki-debug: protos yacc ragel cmd/loki/loki-debug -+loki: cmd/loki/loki -+loki-debug: cmd/loki/loki-debug - - cmd/loki/loki: $(APP_GO_FILES) cmd/loki/main.go - CGO_ENABLED=0 go build $(GO_FLAGS) -o $@ ./$(@D) -@@ -180,7 +173,7 @@ cmd/loki/loki-debug: $(APP_GO_FILES) cmd/loki/main.go - # Loki-Canary # - ############### - --loki-canary: protos yacc ragel cmd/loki-canary/loki-canary -+loki-canary: cmd/loki-canary/loki-canary - - cmd/loki-canary/loki-canary: $(APP_GO_FILES) cmd/loki-canary/main.go - CGO_ENABLED=0 go build $(GO_FLAGS) -o $@ ./$(@D) -@@ -281,6 +274,9 @@ test: all - # Clean # - ######### - -+clean-protos: -+ rm -rf $(PROTO_GOS) -+ - clean: - rm -rf clients/cmd/promtail/promtail - rm -rf cmd/loki/loki -@@ -340,13 +336,9 @@ endif - # Protobufs # - ############# - --protos: $(PROTO_GOS) -- --# use with care. This signals to make that the proto definitions don't need recompiling. --touch-protos: -- for proto in $(PROTO_GOS); do [ -f ""./$${proto}"" ] && touch ""$${proto}"" && echo ""touched $${proto}""; done -+protos: clean-protos $(PROTO_GOS) - --%.pb.go: $(PROTO_DEFS) -+%.pb.go: - ifeq ($(BUILD_IN_CONTAINER),true) - @mkdir -p $(shell pwd)/.pkg - @mkdir -p $(shell pwd)/.cache -diff --git a/clients/cmd/promtail/Dockerfile b/clients/cmd/promtail/Dockerfile -index 726d7366ff3b0..1d037f7c39001 100644 ---- a/clients/cmd/promtail/Dockerfile -+++ b/clients/cmd/promtail/Dockerfile -@@ -1,13 +1,11 @@ - FROM golang:1.17.2-bullseye as build --# TOUCH_PROTOS signifies if we should touch the compiled proto files and thus not regenerate them. --# This is helpful when file system timestamps can't be trusted with make --ARG TOUCH_PROTOS -+ - COPY . /src/loki - WORKDIR /src/loki - # Backports repo required to get a libsystemd version 246 or newer which is required to handle journal +ZSTD compression - RUN echo ""deb http://deb.debian.org/debian bullseye-backports main"" >> /etc/apt/sources.list - RUN apt-get update && apt-get install -t bullseye-backports -qy libsystemd-dev --RUN make clean && (if [ ""${TOUCH_PROTOS}"" ]; then make touch-protos; fi) && make BUILD_IN_CONTAINER=false promtail -+RUN make clean && make BUILD_IN_CONTAINER=false promtail - - # Promtail requires debian as the base image to support systemd journal reading - FROM debian:bullseye-slim -diff --git a/clients/cmd/promtail/Dockerfile.arm32 b/clients/cmd/promtail/Dockerfile.arm32 -index 0a5e8c7590907..a0da39364c117 100644 ---- a/clients/cmd/promtail/Dockerfile.arm32 -+++ b/clients/cmd/promtail/Dockerfile.arm32 -@@ -1,11 +1,9 @@ - FROM golang:1.17.2 as build --# TOUCH_PROTOS signifies if we should touch the compiled proto files and thus not regenerate them. --# This is helpful when file system timestamps can't be trusted with make --ARG TOUCH_PROTOS -+ - COPY . /src/loki - WORKDIR /src/loki - RUN apt-get update && apt-get install -qy libsystemd-dev --RUN make clean && (if [ ""${TOUCH_PROTOS}"" ]; then make touch-protos; fi) && make BUILD_IN_CONTAINER=false promtail -+RUN make clean && make BUILD_IN_CONTAINER=false promtail - - # Promtail requires debian as the base image to support systemd journal reading - FROM debian:stretch-slim -diff --git a/cmd/logcli/Dockerfile b/cmd/logcli/Dockerfile -index d804f41128099..70598177de1aa 100644 ---- a/cmd/logcli/Dockerfile -+++ b/cmd/logcli/Dockerfile -@@ -1,9 +1,8 @@ - FROM golang:1.17.2 as build - --ARG TOUCH_PROTOS - COPY . /src/loki - WORKDIR /src/loki --RUN make clean && (if [ ""${TOUCH_PROTOS}"" ]; then make touch-protos; fi) && make BUILD_IN_CONTAINER=false logcli -+RUN make clean && make BUILD_IN_CONTAINER=false logcli - - FROM alpine:3.13 - -diff --git a/cmd/loki-canary/Dockerfile b/cmd/loki-canary/Dockerfile -index e5aa0b6a8c805..7faa6450efa9c 100644 ---- a/cmd/loki-canary/Dockerfile -+++ b/cmd/loki-canary/Dockerfile -@@ -1,10 +1,8 @@ - FROM golang:1.17.2 as build --# TOUCH_PROTOS signifies if we should touch the compiled proto files and thus not regenerate them. --# This is helpful when file system timestamps can't be trusted with make --ARG TOUCH_PROTOS -+ - COPY . /src/loki - WORKDIR /src/loki --RUN make clean && (if [ ""${TOUCH_PROTOS}"" ]; then make touch-protos; fi) && make BUILD_IN_CONTAINER=false loki-canary -+RUN make clean && make BUILD_IN_CONTAINER=false loki-canary - - FROM alpine:3.13 - RUN apk add --update --no-cache ca-certificates -diff --git a/cmd/loki/Dockerfile b/cmd/loki/Dockerfile -index 81b69d4ef4c82..0fad55ff9bcf4 100644 ---- a/cmd/loki/Dockerfile -+++ b/cmd/loki/Dockerfile -@@ -1,10 +1,8 @@ - FROM golang:1.17.2 as build --# TOUCH_PROTOS signifies if we should touch the compiled proto files and thus not regenerate them. --# This is helpful when file system timestamps can't be trusted with make --ARG TOUCH_PROTOS -+ - COPY . /src/loki - WORKDIR /src/loki --RUN make clean && (if [ ""${TOUCH_PROTOS}"" ]; then make touch-protos; fi) && make BUILD_IN_CONTAINER=false loki -+RUN make clean && make BUILD_IN_CONTAINER=false loki - - FROM alpine:3.13 - -diff --git a/cmd/querytee/Dockerfile b/cmd/querytee/Dockerfile -index ec873807529aa..61e328e0a5d20 100644 ---- a/cmd/querytee/Dockerfile -+++ b/cmd/querytee/Dockerfile -@@ -1,6 +1,5 @@ - FROM golang:1.17.2 as build - --ARG TOUCH_PROTOS - COPY . /src/loki - WORKDIR /src/loki - RUN make clean && make BUILD_IN_CONTAINER=false loki-querytee",Build,"simplify how protos are built (#4639) - -* we always seem to be chasing our tails with how protos are generated and Makes use of timestamps to determine if files should be recompiled. Instead of touching files and altering timestamps always delete the compiled proto files when calling `protos` to make sure they are compiled every time. - -Removed targets to build protos, yaccs, and ragel files when trying to build loki or the canary. This isn't necesary, if you are changing these files you would know you need to build them and the `check-generated-files` should catch any changes to them not committed. - -* rm -rf - -* we have a race between check-generated-files and our other steps, so let that run first. - -also removing `check-generated-files` from the `all` target because its redundant with a separate step and could also race with the parallel lint - -* remove TOUCH_PROTOS - -* more cleanup of TOUCH_PROTOS" -a205dce83600a874da5a02176217e333662cbf01,2023-05-22 21:26:42,Andreas Gebhardt,"chunks-inspect: print chunk version (format) (#9490) - -Get the version byte of the current inspecting Loki chunk printed.",False,"diff --git a/cmd/chunks-inspect/loki.go b/cmd/chunks-inspect/loki.go -index 35bb90774a2cb..d8fd5d0a913fd 100644 ---- a/cmd/chunks-inspect/loki.go -+++ b/cmd/chunks-inspect/loki.go -@@ -62,6 +62,7 @@ const ( - ) - - type LokiChunk struct { -+ format byte - encoding Encoding - - blocks []LokiBlock -@@ -149,6 +150,7 @@ func parseLokiChunk(chunkHeader *ChunkHeader, r io.Reader) (*LokiChunk, error) { - metadata = metadata[n:] - - lokiChunk := &LokiChunk{ -+ format: f, - encoding: compression, - metadataChecksum: metaChecksum, - computedMetadataChecksum: computedMetaChecksum, -diff --git a/cmd/chunks-inspect/main.go b/cmd/chunks-inspect/main.go -index 0aa99a57f1249..c25f621845b06 100644 ---- a/cmd/chunks-inspect/main.go -+++ b/cmd/chunks-inspect/main.go -@@ -65,6 +65,7 @@ func printFile(filename string, blockDetails, printLines, storeBlocks bool) { - return - } - -+ fmt.Println(""Format (Version):"", lokiChunk.format) - fmt.Println(""Encoding:"", lokiChunk.encoding) - fmt.Print(""Blocks Metadata Checksum: "", fmt.Sprintf(""%08x"", lokiChunk.metadataChecksum)) - if lokiChunk.metadataChecksum == lokiChunk.computedMetadataChecksum {",unknown,"chunks-inspect: print chunk version (format) (#9490) - -Get the version byte of the current inspecting Loki chunk printed." -b1d4efab1203adf5d110261f12171fc03148ebbe,2022-10-06 01:17:00,Dylan Guedes,"Loki: Per-tenant stream sharding (#7311) - -**What this PR does / why we need it**: -- Move stream sharding configuration to its own package to avoid cyclic -imports -- Change stream sharding to be a per-tenant configuration -- Change ingesters to reject whole streams due to rate-limit based on -per-tenant stream sharding -- Change stream sharding flags prefix from `distributor.shard-stream` to -`shard-stream`",False,"diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md -index 5d1b71bdf5261..70d28b98fd079 100644 ---- a/docs/sources/configuration/_index.md -+++ b/docs/sources/configuration/_index.md -@@ -318,28 +318,6 @@ ring: - # reading and writing. - # CLI flag: -distributor.ring.heartbeat-timeout - [heartbeat_timeout: | default = 1m] -- --# Configures the distributor to shard streams that are too big --shard_streams: -- # Whether to enable stream sharding -- # -- # CLI flag: -distributor.shard-streams.enabled -- [enabled: | default = false] -- -- # Enable logging when sharding streams because logging on the read path may -- # impact performance. When disabled, stream sharding will emit no logs -- # regardless of log level -- # -- # CLI flag: -distributor.shard-streams.logging-enabled -- [logging_enabled: | default = false] -- -- # Threshold that determines how much the stream should be sharded. -- # The formula used is n = ceil(stream size + ingested rate / desired rate), where n is the number of shards. -- # For instance, if a stream ingestion is at 10MB, desired rate is 3MB (default), and a stream of size 1MB is -- # received, the given stream will be split into n = ceil((1 + 10)/3) = 4 shards. -- # -- # CLI flag: -distributor.shard-streams.desired-rate -- [desired_rate: | default = 3MB] - ``` - - ## querier -@@ -2364,6 +2342,28 @@ The `limits_config` block configures global and per-tenant limits in Loki. - # CLI flag: -ingester.per-stream-rate-limit-burst - [per_stream_rate_limit_burst: | default = ""15MB""] - -+# Configures the distributor to shard streams that are too big -+shard_streams: -+ # Whether to enable stream sharding -+ # -+ # CLI flag: -shard-streams.enabled -+ [enabled: | default = false] -+ -+ # Enable logging when sharding streams because logging on the read path may -+ # impact performance. When disabled, stream sharding will emit no logs -+ # regardless of log level -+ # -+ # CLI flag: -shard-streams.logging-enabled -+ [logging_enabled: | default = false] -+ -+ # Threshold that determines how much the stream should be sharded. -+ # The formula used is n = ceil(stream size + ingested rate / desired rate), where n is the number of shards. -+ # For instance, if a stream ingestion is at 10MB, desired rate is 3MB (default), and a stream of size 1MB is -+ # received, the given stream will be split into n = ceil((1 + 10)/3) = 4 shards. -+ # -+ # CLI flag: -shard-streams.desired-rate -+ [desired_rate: | default = 3MB] -+ - # Limit how far back in time series data and metadata can be queried, - # up until lookback duration ago. - # This limit is enforced in the query frontend, the querier and the ruler. -diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go -index ef95af7c6f3e2..f75fdf5a957f7 100644 ---- a/pkg/distributor/distributor.go -+++ b/pkg/distributor/distributor.go -@@ -30,6 +30,7 @@ import ( - ""go.uber.org/atomic"" - - ""github.com/grafana/loki/pkg/distributor/clientpool"" -+ ""github.com/grafana/loki/pkg/distributor/shardstreams"" - ""github.com/grafana/loki/pkg/ingester/client"" - ""github.com/grafana/loki/pkg/logproto"" - ""github.com/grafana/loki/pkg/logql/syntax"" -@@ -37,7 +38,6 @@ import ( - ""github.com/grafana/loki/pkg/storage/stores/indexshipper/compactor/retention"" - ""github.com/grafana/loki/pkg/usagestats"" - ""github.com/grafana/loki/pkg/util"" -- ""github.com/grafana/loki/pkg/util/flagext"" - util_log ""github.com/grafana/loki/pkg/util/log"" - ""github.com/grafana/loki/pkg/validation"" - ) -@@ -51,21 +51,6 @@ var ( - rfStats = usagestats.NewInt(""distributor_replication_factor"") - ) - --type ShardStreamsConfig struct { -- Enabled bool `yaml:""enabled""` -- LoggingEnabled bool `yaml:""logging_enabled""` -- -- // DesiredRate is the threshold used to shard the stream into smaller pieces. -- // Expected to be in bytes. -- DesiredRate flagext.ByteSize `yaml:""desired_rate""` --} -- --func (cfg *ShardStreamsConfig) RegisterFlagsWithPrefix(prefix string, fs *flag.FlagSet) { -- fs.BoolVar(&cfg.Enabled, prefix+"".enabled"", false, ""Automatically shard streams to keep them under the per-stream rate limit"") -- fs.BoolVar(&cfg.LoggingEnabled, prefix+"".logging-enabled"", false, ""Enable logging when sharding streams"") -- fs.Var(&cfg.DesiredRate, prefix+"".desired-rate"", ""threshold used to cut a new shard. Default (3MB) means if a rate is above 3MB, it will be sharded."") --} -- - // Config for a Distributor. - type Config struct { - // Distributors ring -@@ -73,15 +58,11 @@ type Config struct { - - // For testing. - factory ring_client.PoolFactory `yaml:""-""` -- -- // ShardStreams configures wether big streams should be sharded or not. -- ShardStreams ShardStreamsConfig `yaml:""shard_streams""` - } - - // RegisterFlags registers distributor-related flags. - func (cfg *Config) RegisterFlags(fs *flag.FlagSet) { - cfg.DistributorRing.RegisterFlags(fs) -- cfg.ShardStreams.RegisterFlagsWithPrefix(""distributor.shard-streams"", fs) - } - - // RateStore manages the ingestion rate of streams, populated by data fetched from ingesters. -@@ -329,7 +310,8 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log - } - stream.Entries = stream.Entries[:n] - -- if d.cfg.ShardStreams.Enabled { -+ shardStreamsCfg := d.validator.Limits.ShardStreams(userID) -+ if shardStreamsCfg.Enabled { - derivedKeys, derivedStreams := d.shardStream(stream, streamSize, userID) - keys = append(keys, derivedKeys...) - streams = append(streams, derivedStreams...) -@@ -409,15 +391,15 @@ func min(x1, x2 int) int { - // N is the sharding size for the given stream. shardSteam returns the smaller - // streams and their associated keys for hashing to ingesters. - func (d *Distributor) shardStream(stream logproto.Stream, streamSize int, userID string) ([]uint32, []streamTracker) { -+ shardStreamsCfg := d.validator.Limits.ShardStreams(userID) - logger := log.With(util_log.WithUserID(userID, util_log.Logger), ""stream"", stream.Labels) -- -- shardCount := d.shardCountFor(logger, &stream, streamSize, d.cfg.ShardStreams.DesiredRate.Val(), d.rateStore) -+ shardCount := d.shardCountFor(logger, &stream, streamSize, d.rateStore, shardStreamsCfg) - - if shardCount <= 1 { - return []uint32{util.TokenFor(userID, stream.Labels)}, []streamTracker{{stream: stream}} - } - -- if d.cfg.ShardStreams.LoggingEnabled { -+ if shardStreamsCfg.LoggingEnabled { - level.Info(logger).Log(""msg"", ""sharding request"", ""shard_count"", shardCount) - } - -@@ -427,7 +409,7 @@ func (d *Distributor) shardStream(stream logproto.Stream, streamSize int, userID - derivedKeys := make([]uint32, 0, shardCount) - derivedStreams := make([]streamTracker, 0, shardCount) - for i := 0; i < shardCount; i++ { -- shard, ok := d.createShard(stream, streamLabels, streamPattern, shardCount, i) -+ shard, ok := d.createShard(shardStreamsCfg, stream, streamLabels, streamPattern, shardCount, i) - if !ok { - level.Error(logger).Log(""msg"", ""couldn't create shard"", ""idx"", i) - continue -@@ -436,7 +418,7 @@ func (d *Distributor) shardStream(stream logproto.Stream, streamSize int, userID - derivedKeys = append(derivedKeys, util.TokenFor(userID, shard.Labels)) - derivedStreams = append(derivedStreams, streamTracker{stream: shard}) - -- if d.cfg.ShardStreams.LoggingEnabled { -+ if shardStreamsCfg.LoggingEnabled { - level.Info(util_log.Logger).Log(""msg"", ""stream derived from sharding"", ""src-stream"", stream.Labels, ""derived-stream"", shard.Labels) - } - } -@@ -460,8 +442,8 @@ func labelTemplate(lbls string) labels.Labels { - return streamLabels - } - --func (d *Distributor) createShard(stream logproto.Stream, lbls labels.Labels, streamPattern string, totalShards, shardNumber int) (logproto.Stream, bool) { -- lowerBound, upperBound, ok := d.boundsFor(stream, totalShards, shardNumber) -+func (d *Distributor) createShard(shardStreamsCfg *shardstreams.Config, stream logproto.Stream, lbls labels.Labels, streamPattern string, totalShards, shardNumber int) (logproto.Stream, bool) { -+ lowerBound, upperBound, ok := d.boundsFor(stream, totalShards, shardNumber, shardStreamsCfg.LoggingEnabled) - if !ok { - return logproto.Stream{}, false - } -@@ -475,7 +457,7 @@ func (d *Distributor) createShard(stream logproto.Stream, lbls labels.Labels, st - }, true - } - --func (d *Distributor) boundsFor(stream logproto.Stream, totalShards, shardNumber int) (int, int, bool) { -+func (d *Distributor) boundsFor(stream logproto.Stream, totalShards, shardNumber int, loggingEnabled bool) (int, int, bool) { - entriesPerWindow := float64(len(stream.Entries)) / float64(totalShards) - - fIdx := float64(shardNumber) -@@ -483,7 +465,7 @@ func (d *Distributor) boundsFor(stream logproto.Stream, totalShards, shardNumber - upperBound := min(int(entriesPerWindow*(1+fIdx)), len(stream.Entries)) - - if lowerBound > upperBound { -- if d.cfg.ShardStreams.LoggingEnabled { -+ if loggingEnabled { - level.Warn(util_log.Logger).Log(""msg"", ""sharding with lowerbound > upperbound"", ""lowerbound"", lowerBound, ""upperbound"", upperBound, ""shards"", totalShards, ""labels"", stream.Labels) - } - return 0, 0, false -@@ -598,10 +580,10 @@ func (d *Distributor) parseStreamLabels(vContext validationContext, key string, - // based on the rate stored in the rate store and will store the new evaluated number of shards. - // - // desiredRate is expected to be given in bytes. --func (d *Distributor) shardCountFor(logger log.Logger, stream *logproto.Stream, streamSize, desiredRate int, rateStore RateStore) int { -- if desiredRate <= 0 { -- if d.cfg.ShardStreams.LoggingEnabled { -- level.Error(logger).Log(""msg"", ""invalid desired rate"", ""desired_rate"", desiredRate) -+func (d *Distributor) shardCountFor(logger log.Logger, stream *logproto.Stream, streamSize int, rateStore RateStore, streamShardcfg *shardstreams.Config) int { -+ if streamShardcfg.DesiredRate.Val() <= 0 { -+ if streamShardcfg.LoggingEnabled { -+ level.Error(logger).Log(""msg"", ""invalid desired rate"", ""desired_rate"", streamShardcfg.DesiredRate.String()) - } - return 1 - } -@@ -609,16 +591,16 @@ func (d *Distributor) shardCountFor(logger log.Logger, stream *logproto.Stream, - rate, err := rateStore.RateFor(stream) - if err != nil { - d.streamShardingFailures.WithLabelValues(""rate_not_found"").Inc() -- if d.cfg.ShardStreams.LoggingEnabled { -+ if streamShardcfg.LoggingEnabled { - level.Error(logger).Log(""msg"", ""couldn't shard stream because rate store returned error"", ""err"", err) - } - return 1 - } - -- shards := calculateShards(rate, streamSize, desiredRate) -+ shards := calculateShards(rate, streamSize, streamShardcfg.DesiredRate.Val()) - if shards > len(stream.Entries) { - d.streamShardingFailures.WithLabelValues(""too_many_shards"").Inc() -- if d.cfg.ShardStreams.LoggingEnabled { -+ if streamShardcfg.LoggingEnabled { - level.Error(logger).Log(""msg"", ""number of shards bigger than number of entries"", ""shards"", shards, ""entries"", len(stream.Entries)) - } - return len(stream.Entries) -diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go -index 9dbf4132e6d63..e802b20215233 100644 ---- a/pkg/distributor/distributor_test.go -+++ b/pkg/distributor/distributor_test.go -@@ -672,11 +672,21 @@ func TestStreamShard(t *testing.T) { - t.Run(tc.name, func(t *testing.T) { - baseStream.Entries = tc.entries - -+ distributorLimits := &validation.Limits{} -+ flagext.DefaultValues(distributorLimits) -+ distributorLimits.ShardStreams.DesiredRate = desiredRate -+ -+ overrides, err := validation.NewOverrides(*distributorLimits, nil) -+ require.NoError(t, err) -+ -+ validator, err := NewValidator(overrides) -+ require.NoError(t, err) -+ - d := Distributor{ - rateStore: &noopRateStore{}, - streamShardingFailures: shardingFailureMetric, -+ validator: validator, - } -- d.cfg.ShardStreams.DesiredRate = desiredRate - - _, derivedStreams := d.shardStream(baseStream, tc.streamSize, ""fake"") - require.Equal(t, tc.wantDerivedStream, derivedStreams) -@@ -865,21 +875,12 @@ func TestShardCountFor(t *testing.T) { - name string - stream *logproto.Stream - rate int -- desiredRate int -+ desiredRate loki_flagext.ByteSize - - wantStreamSize int // used for sanity check. - wantShards int - wantErr bool - }{ -- { -- name: ""2 entries with zero rate and desired rate < 0, return 1 shard"", -- stream: &logproto.Stream{Hash: 1}, -- rate: 0, -- desiredRate: -5, // in bytes -- wantStreamSize: 2, // in bytes -- wantShards: 1, -- wantErr: false, -- }, - { - name: ""2 entries with zero rate and desired rate == 0, return 1 shard"", - stream: &logproto.Stream{Hash: 1}, -@@ -953,11 +954,12 @@ func TestShardCountFor(t *testing.T) { - limits := &validation.Limits{} - flagext.DefaultValues(limits) - limits.EnforceMetricName = false -+ limits.ShardStreams.DesiredRate = tc.desiredRate - - d := &Distributor{ - streamShardingFailures: shardingFailureMetric, - } -- got := d.shardCountFor(util_log.Logger, tc.stream, tc.wantStreamSize, tc.desiredRate, &noopRateStore{tc.rate}) -+ got := d.shardCountFor(util_log.Logger, tc.stream, tc.wantStreamSize, &noopRateStore{tc.rate}, limits.ShardStreams) - require.Equal(t, tc.wantShards, got) - }) - } -diff --git a/pkg/distributor/limits.go b/pkg/distributor/limits.go -index 9cff9c140140f..7b6fa23d7287d 100644 ---- a/pkg/distributor/limits.go -+++ b/pkg/distributor/limits.go -@@ -1,6 +1,10 @@ - package distributor - --import ""time"" -+import ( -+ ""time"" -+ -+ ""github.com/grafana/loki/pkg/distributor/shardstreams"" -+) - - // Limits is an interface for distributor limits/related configs - type Limits interface { -@@ -16,4 +20,6 @@ type Limits interface { - RejectOldSamplesMaxAge(userID string) time.Duration - - IncrementDuplicateTimestamps(userID string) bool -+ -+ ShardStreams(userID string) *shardstreams.Config - } -diff --git a/pkg/distributor/shardstreams/config.go b/pkg/distributor/shardstreams/config.go -new file mode 100644 -index 0000000000000..6a92472451543 ---- /dev/null -+++ b/pkg/distributor/shardstreams/config.go -@@ -0,0 +1,23 @@ -+package shardstreams -+ -+import ( -+ ""flag"" -+ -+ ""github.com/grafana/loki/pkg/util/flagext"" -+) -+ -+type Config struct { -+ Enabled bool `yaml:""enabled"" json:""enabled""` -+ LoggingEnabled bool `yaml:""logging_enabled"" json:""logging_enabled""` -+ -+ // DesiredRate is the threshold used to shard the stream into smaller pieces. -+ // Expected to be in bytes. -+ DesiredRate flagext.ByteSize `yaml:""desired_rate"" json:""desired_rate""` -+} -+ -+func (cfg *Config) RegisterFlagsWithPrefix(prefix string, fs *flag.FlagSet) { -+ fs.BoolVar(&cfg.Enabled, prefix+"".enabled"", false, ""Automatically shard streams to keep them under the per-stream rate limit"") -+ fs.BoolVar(&cfg.LoggingEnabled, prefix+"".logging-enabled"", false, ""Enable logging when sharding streams"") -+ cfg.DesiredRate.Set(""3mb"") //nolint:errcheck -+ fs.Var(&cfg.DesiredRate, prefix+"".desired-rate"", ""threshold used to cut a new shard. Default (3MB) means if a rate is above 3MB, it will be sharded."") -+} -diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go -index 85662cd86be5e..9609d6ad7d998 100644 ---- a/pkg/ingester/ingester.go -+++ b/pkg/ingester/ingester.go -@@ -105,9 +105,6 @@ type Config struct { - IndexShards int `yaml:""index_shards""` - - MaxDroppedStreams int `yaml:""max_dropped_streams""` -- -- // Whether nor not to ingest all at once or not. Comes from distributor StreamShards Enabled -- RateLimitWholeStream bool `yaml:""-""` - } - - // RegisterFlags registers the flags. -diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go -index 9518c50ab9c8d..41351c27146b3 100644 ---- a/pkg/ingester/instance.go -+++ b/pkg/ingester/instance.go -@@ -172,6 +172,7 @@ func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error { - record := recordPool.GetRecord() - record.UserID = i.instanceID - defer recordPool.PutRecord(record) -+ rateLimitWholeStream := i.limiter.limits.ShardStreams(i.instanceID).Enabled - - var appendErr error - for _, reqStream := range req.Streams { -@@ -195,7 +196,7 @@ func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error { - continue - } - -- _, appendErr = s.Push(ctx, reqStream.Entries, record, 0, false) -+ _, appendErr = s.Push(ctx, reqStream.Entries, record, 0, false, rateLimitWholeStream) - s.chunkMtx.Unlock() - } - -diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go -index c6d6362aa724b..dbe486b3debfd 100644 ---- a/pkg/ingester/instance_test.go -+++ b/pkg/ingester/instance_test.go -@@ -10,19 +10,20 @@ import ( - ""testing"" - ""time"" - -- ""github.com/grafana/loki/pkg/logql/syntax"" -- ""github.com/grafana/loki/pkg/querier/astmapper"" -- ""github.com/grafana/loki/pkg/storage/chunk"" -- ""github.com/grafana/loki/pkg/storage/config"" -- -+ ""github.com/grafana/dskit/flagext"" - ""github.com/pkg/errors"" - ""github.com/prometheus/common/model"" - ""github.com/prometheus/prometheus/model/labels"" - ""github.com/stretchr/testify/require"" - -+ ""github.com/grafana/loki/pkg/distributor/shardstreams"" - ""github.com/grafana/loki/pkg/logproto"" - ""github.com/grafana/loki/pkg/logql"" -+ ""github.com/grafana/loki/pkg/logql/syntax"" -+ ""github.com/grafana/loki/pkg/querier/astmapper"" - loki_runtime ""github.com/grafana/loki/pkg/runtime"" -+ ""github.com/grafana/loki/pkg/storage/chunk"" -+ ""github.com/grafana/loki/pkg/storage/config"" - ""github.com/grafana/loki/pkg/validation"" - ) - -@@ -646,6 +647,109 @@ func Test_QuerySampleWithDelete(t *testing.T) { - require.Equal(t, samples, []float64{1.}) - } - -+type fakeLimits struct { -+ limits map[string]*validation.Limits -+} -+ -+func (f fakeLimits) TenantLimits(userID string) *validation.Limits { -+ limits, ok := f.limits[userID] -+ if !ok { -+ return nil -+ } -+ -+ return limits -+} -+ -+func (f fakeLimits) AllByUserID() map[string]*validation.Limits { -+ return f.limits -+} -+ -+func TestStreamShardingUsage(t *testing.T) { -+ setupCustomTenantLimit := func(perStreamLimit string) *validation.Limits { -+ shardStreamsCfg := &shardstreams.Config{Enabled: true, LoggingEnabled: true} -+ shardStreamsCfg.DesiredRate.Set(""6MB"") //nolint:errcheck -+ -+ customTenantLimits := &validation.Limits{} -+ flagext.DefaultValues(customTenantLimits) -+ -+ customTenantLimits.PerStreamRateLimit.Set(perStreamLimit) //nolint:errcheck -+ customTenantLimits.PerStreamRateLimitBurst.Set(perStreamLimit) //nolint:errcheck -+ customTenantLimits.ShardStreams = shardStreamsCfg -+ -+ return customTenantLimits -+ } -+ -+ customTenant1 := ""my-org1"" -+ customTenant2 := ""my-org2"" -+ -+ limitsDefinition := &fakeLimits{ -+ limits: make(map[string]*validation.Limits), -+ } -+ // testing with 1 because although 1 is enough to accept at least the -+ // first line entry, because per-stream sharding is enabled, -+ // all entries are rejected if one of them isn't to be accepted. -+ limitsDefinition.limits[customTenant1] = setupCustomTenantLimit(""1"") -+ limitsDefinition.limits[customTenant2] = setupCustomTenantLimit(""4"") -+ -+ limits, err := validation.NewOverrides(defaultLimitsTestConfig(), limitsDefinition) -+ require.NoError(t, err) -+ -+ limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) -+ -+ defaultShardStreamsCfg := limiter.limits.ShardStreams(""fake"") -+ tenantShardStreamsCfg := limiter.limits.ShardStreams(customTenant1) -+ -+ t.Run(""test default configuration"", func(t *testing.T) { -+ require.Equal(t, false, defaultShardStreamsCfg.Enabled) -+ require.Equal(t, ""3MB"", defaultShardStreamsCfg.DesiredRate.String()) -+ require.Equal(t, false, defaultShardStreamsCfg.LoggingEnabled) -+ }) -+ -+ t.Run(""test configuration being applied"", func(t *testing.T) { -+ require.Equal(t, true, tenantShardStreamsCfg.Enabled) -+ require.Equal(t, ""6MB"", tenantShardStreamsCfg.DesiredRate.String()) -+ require.Equal(t, true, tenantShardStreamsCfg.LoggingEnabled) -+ }) -+ -+ t.Run(""invalid push returns error"", func(t *testing.T) { -+ i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant1, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil) -+ ctx := context.Background() -+ -+ err = i.Push(ctx, &logproto.PushRequest{ -+ Streams: []logproto.Stream{ -+ { -+ Labels: `{cpu=""10"",endpoint=""https"",instance=""10.253.57.87:9100"",job=""node-exporter"",mode=""idle"",namespace=""observability"",pod=""node-exporter-l454v"",service=""node-exporter""}`, -+ Entries: []logproto.Entry{ -+ {Timestamp: time.Now(), Line: ""1""}, -+ {Timestamp: time.Now(), Line: ""2""}, -+ {Timestamp: time.Now(), Line: ""3""}, -+ }, -+ }, -+ }, -+ }) -+ require.Error(t, err) -+ }) -+ -+ t.Run(""valid push returns no error"", func(t *testing.T) { -+ i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant2, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil) -+ ctx := context.Background() -+ -+ err = i.Push(ctx, &logproto.PushRequest{ -+ Streams: []logproto.Stream{ -+ { -+ Labels: `{myotherlabel=""myothervalue""}`, -+ Entries: []logproto.Entry{ -+ {Timestamp: time.Now(), Line: ""1""}, -+ {Timestamp: time.Now(), Line: ""2""}, -+ {Timestamp: time.Now(), Line: ""3""}, -+ }, -+ }, -+ }, -+ }) -+ require.NoError(t, err) -+ }) -+} -+ - func defaultInstance(t *testing.T) *instance { - ingesterConfig := defaultIngesterTestConfig(t) - defaultLimits := defaultLimitsTestConfig() -diff --git a/pkg/ingester/recovery.go b/pkg/ingester/recovery.go -index c3bbc9a8af442..4baa2d875527c 100644 ---- a/pkg/ingester/recovery.go -+++ b/pkg/ingester/recovery.go -@@ -165,7 +165,7 @@ func (r *ingesterRecoverer) Push(userID string, entries RefEntries) error { - } - - // ignore out of order errors here (it's possible for a checkpoint to already have data from the wal segments) -- bytesAdded, err := s.(*stream).Push(context.Background(), entries.Entries, nil, entries.Counter, true) -+ bytesAdded, err := s.(*stream).Push(context.Background(), entries.Entries, nil, entries.Counter, true, false) - r.ing.replayController.Add(int64(bytesAdded)) - if err != nil && err == ErrEntriesExist { - r.ing.metrics.duplicateEntriesTotal.Add(float64(len(entries.Entries))) -diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go -index 616a490a50cce..40105a8f1b5ac 100644 ---- a/pkg/ingester/stream.go -+++ b/pkg/ingester/stream.go -@@ -150,6 +150,8 @@ func (s *stream) Push( - // Lock chunkMtx while pushing. - // If this is false, chunkMtx must be held outside Push. - lockChunk bool, -+ // Whether nor not to ingest all at once or not. It is a per-tenant configuration. -+ rateLimitWholeStream bool, - ) (int, error) { - if lockChunk { - s.chunkMtx.Lock() -@@ -168,8 +170,8 @@ func (s *stream) Push( - return 0, ErrEntriesExist - } - -- toStore, invalid := s.validateEntries(entries, isReplay) -- if s.cfg.RateLimitWholeStream && hasRateLimitErr(invalid) { -+ toStore, invalid := s.validateEntries(entries, isReplay, rateLimitWholeStream) -+ if rateLimitWholeStream && hasRateLimitErr(invalid) { - return 0, errorForFailedEntries(s, invalid, len(entries)) - } - -@@ -320,7 +322,7 @@ func (s *stream) storeEntries(ctx context.Context, entries []logproto.Entry) (in - return bytesAdded, storedEntries, invalid - } - --func (s *stream) validateEntries(entries []logproto.Entry, isReplay bool) ([]logproto.Entry, []entryWithError) { -+func (s *stream) validateEntries(entries []logproto.Entry, isReplay, rateLimitWholeStream bool) ([]logproto.Entry, []entryWithError) { - var ( - outOfOrderSamples, outOfOrderBytes int - rateLimitedSamples, rateLimitedBytes int -@@ -349,7 +351,7 @@ func (s *stream) validateEntries(entries []logproto.Entry, isReplay bool) ([]log - totalBytes += lineBytes - - now := time.Now() -- if !s.cfg.RateLimitWholeStream && !s.limiter.AllowN(now, lineBytes) { -+ if !rateLimitWholeStream && !s.limiter.AllowN(now, len(entries[i].Line)) { - failedEntriesWithError = append(failedEntriesWithError, entryWithError{&entries[i], &validation.ErrStreamRateLimit{RateLimit: flagext.ByteSize(limit), Labels: s.labelsString, Bytes: flagext.ByteSize(lineBytes)}}) - rateLimitedSamples++ - rateLimitedBytes += lineBytes -@@ -380,7 +382,7 @@ func (s *stream) validateEntries(entries []logproto.Entry, isReplay bool) ([]log - // ingestion, the limiter should only be advanced when the whole stream can be - // sent - now := time.Now() -- if s.cfg.RateLimitWholeStream && !s.limiter.AllowN(now, validBytes) { -+ if rateLimitWholeStream && !s.limiter.AllowN(now, totalBytes) { - // Report that the whole stream was rate limited - rateLimitedSamples = len(entries) - failedEntriesWithError = make([]entryWithError, 0, len(entries)) -diff --git a/pkg/ingester/stream_test.go b/pkg/ingester/stream_test.go -index 0e6e1d0350c5b..5ec613fc46bd2 100644 ---- a/pkg/ingester/stream_test.go -+++ b/pkg/ingester/stream_test.go -@@ -66,7 +66,7 @@ func TestMaxReturnedStreamsErrors(t *testing.T) { - - _, err := s.Push(context.Background(), []logproto.Entry{ - {Timestamp: time.Unix(int64(numLogs), 0), Line: ""log""}, -- }, recordPool.GetRecord(), 0, true) -+ }, recordPool.GetRecord(), 0, true, false) - require.NoError(t, err) - - newLines := make([]logproto.Entry, numLogs) -@@ -86,7 +86,7 @@ func TestMaxReturnedStreamsErrors(t *testing.T) { - fmt.Fprintf(&expected, ""total ignored: %d out of %d"", numLogs, numLogs) - expectErr := httpgrpc.Errorf(http.StatusBadRequest, expected.String()) - -- _, err = s.Push(context.Background(), newLines, recordPool.GetRecord(), 0, true) -+ _, err = s.Push(context.Background(), newLines, recordPool.GetRecord(), 0, true, false) - require.Error(t, err) - require.Equal(t, expectErr.Error(), err.Error()) - }) -@@ -114,7 +114,7 @@ func TestPushDeduplication(t *testing.T) { - {Timestamp: time.Unix(1, 0), Line: ""test""}, - {Timestamp: time.Unix(1, 0), Line: ""test""}, - {Timestamp: time.Unix(1, 0), Line: ""newer, better test""}, -- }, recordPool.GetRecord(), 0, true) -+ }, recordPool.GetRecord(), 0, true, false) - require.NoError(t, err) - require.Len(t, s.chunks, 1) - require.Equal(t, s.chunks[0].chunk.Size(), 2, -@@ -144,7 +144,7 @@ func TestPushRejectOldCounter(t *testing.T) { - {Timestamp: time.Unix(1, 0), Line: ""test""}, - {Timestamp: time.Unix(1, 0), Line: ""test""}, - {Timestamp: time.Unix(1, 0), Line: ""newer, better test""}, -- }, recordPool.GetRecord(), 0, true) -+ }, recordPool.GetRecord(), 0, true, false) - require.NoError(t, err) - require.Len(t, s.chunks, 1) - require.Equal(t, s.chunks[0].chunk.Size(), 2, -@@ -153,13 +153,13 @@ func TestPushRejectOldCounter(t *testing.T) { - // fail to push with a counter <= the streams internal counter - _, err = s.Push(context.Background(), []logproto.Entry{ - {Timestamp: time.Unix(1, 0), Line: ""test""}, -- }, recordPool.GetRecord(), 2, true) -+ }, recordPool.GetRecord(), 2, true, false) - require.Equal(t, ErrEntriesExist, err) - - // succeed with a greater counter - _, err = s.Push(context.Background(), []logproto.Entry{ - {Timestamp: time.Unix(1, 0), Line: ""test""}, -- }, recordPool.GetRecord(), 3, true) -+ }, recordPool.GetRecord(), 3, true, false) - require.Nil(t, err) - - } -@@ -273,7 +273,7 @@ func TestUnorderedPush(t *testing.T) { - if x.cutBefore { - _ = s.cutChunk(context.Background()) - } -- written, err := s.Push(context.Background(), x.entries, recordPool.GetRecord(), 0, true) -+ written, err := s.Push(context.Background(), x.entries, recordPool.GetRecord(), 0, true, false) - if x.err { - require.NotNil(t, err) - } else { -@@ -334,7 +334,8 @@ func TestPushRateLimit(t *testing.T) { - {Timestamp: time.Unix(1, 0), Line: ""aaaaaaaaab""}, - } - // Counter should be 2 now since the first line will be deduped. -- _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true) -+ _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true, true) -+ require.Error(t, err) - require.Contains(t, err.Error(), (&validation.ErrStreamRateLimit{RateLimit: l.PerStreamRateLimit, Labels: s.labelsString, Bytes: flagext.ByteSize(len(entries[1].Line))}).Error()) - } - -@@ -348,7 +349,6 @@ func TestPushRateLimitAllOrNothing(t *testing.T) { - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) - - cfg := defaultConfig() -- cfg.RateLimitWholeStream = true - - s := newStream( - cfg, -@@ -368,7 +368,8 @@ func TestPushRateLimitAllOrNothing(t *testing.T) { - } - - // Both entries have errors because rate limiting is done all at once -- _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true) -+ _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true, true) -+ require.Error(t, err) - require.Contains(t, err.Error(), (&validation.ErrStreamRateLimit{RateLimit: l.PerStreamRateLimit, Labels: s.labelsString, Bytes: flagext.ByteSize(len(entries[0].Line))}).Error()) - require.Contains(t, err.Error(), (&validation.ErrStreamRateLimit{RateLimit: l.PerStreamRateLimit, Labels: s.labelsString, Bytes: flagext.ByteSize(len(entries[1].Line))}).Error()) - } -@@ -400,7 +401,7 @@ func TestReplayAppendIgnoresValidityWindow(t *testing.T) { - } - - // Push a first entry (it doesn't matter if we look like we're replaying or not) -- _, err = s.Push(context.Background(), entries, nil, 1, true) -+ _, err = s.Push(context.Background(), entries, nil, 1, true, false) - require.Nil(t, err) - - // Create a sample outside the validity window -@@ -409,11 +410,11 @@ func TestReplayAppendIgnoresValidityWindow(t *testing.T) { - } - - // Pretend it's not a replay, ensure we error -- _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true) -+ _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true, false) - require.NotNil(t, err) - - // Now pretend it's a replay. The same write should succeed. -- _, err = s.Push(context.Background(), entries, nil, 2, true) -+ _, err = s.Push(context.Background(), entries, nil, 2, true, false) - require.Nil(t, err) - - } -@@ -455,7 +456,7 @@ func Benchmark_PushStream(b *testing.B) { - - for n := 0; n < b.N; n++ { - rec := recordPool.GetRecord() -- _, err := s.Push(ctx, e, rec, 0, true) -+ _, err := s.Push(ctx, e, rec, 0, true, false) - require.NoError(b, err) - recordPool.PutRecord(rec) - } -diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go -index 9e2ccf59dc23e..a65128965351a 100644 ---- a/pkg/loki/modules.go -+++ b/pkg/loki/modules.go -@@ -438,7 +438,6 @@ func (t *Loki) initQuerier() (services.Service, error) { - - func (t *Loki) initIngester() (_ services.Service, err error) { - t.Cfg.Ingester.LifecyclerConfig.ListenPort = t.Cfg.Server.GRPCListenPort -- t.Cfg.Ingester.RateLimitWholeStream = t.Cfg.Distributor.ShardStreams.Enabled - - t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Cfg.IngesterClient, t.Store, t.overrides, t.tenantConfigs, prometheus.DefaultRegisterer) - if err != nil { -diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go -index 5100fa5910d71..54eb1dbbd2a41 100644 ---- a/pkg/validation/limits.go -+++ b/pkg/validation/limits.go -@@ -10,9 +10,6 @@ import ( - ""github.com/go-kit/log/level"" - dskit_flagext ""github.com/grafana/dskit/flagext"" - -- ""github.com/grafana/loki/pkg/storage/stores/indexshipper/compactor/deletionmode"" -- util_log ""github.com/grafana/loki/pkg/util/log"" -- - ""github.com/pkg/errors"" - ""github.com/prometheus/common/model"" - ""github.com/prometheus/common/sigv4"" -@@ -21,9 +18,12 @@ import ( - ""golang.org/x/time/rate"" - ""gopkg.in/yaml.v2"" - -+ ""github.com/grafana/loki/pkg/distributor/shardstreams"" - ""github.com/grafana/loki/pkg/logql/syntax"" - ""github.com/grafana/loki/pkg/ruler/util"" -+ ""github.com/grafana/loki/pkg/storage/stores/indexshipper/compactor/deletionmode"" - ""github.com/grafana/loki/pkg/util/flagext"" -+ util_log ""github.com/grafana/loki/pkg/util/log"" - ) - - const ( -@@ -148,6 +148,8 @@ type Limits struct { - - // Deprecated - CompactorDeletionEnabled bool `yaml:""allow_deletes"" json:""allow_deletes""` -+ -+ ShardStreams *shardstreams.Config `yaml:""shard_streams"" json:""shard_streams""` - } - - type StreamRetention struct { -@@ -230,6 +232,9 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { - - // Deprecated - dskit_flagext.DeprecatedFlag(f, ""compactor.allow-deletes"", ""Deprecated. Instead, see compactor.deletion-mode which is another per tenant configuration"", util_log.Logger) -+ -+ l.ShardStreams = &shardstreams.Config{} -+ l.ShardStreams.RegisterFlagsWithPrefix(""shard-streams"", f) - } - - // UnmarshalYAML implements the yaml.Unmarshaler interface. -@@ -608,6 +613,10 @@ func (o *Overrides) DeletionMode(userID string) string { - return o.getOverridesForUser(userID).DeletionMode - } - -+func (o *Overrides) ShardStreams(userID string) *shardstreams.Config { -+ return o.getOverridesForUser(userID).ShardStreams -+} -+ - func (o *Overrides) DefaultLimits() *Limits { - return o.defaultLimits - } -diff --git a/pkg/validation/limits_test.go b/pkg/validation/limits_test.go -index 41d185946ffbb..72ad9222c5264 100644 ---- a/pkg/validation/limits_test.go -+++ b/pkg/validation/limits_test.go -@@ -72,6 +72,10 @@ ruler_remote_write_sigv4_config: - per_tenant_override_config: """" - per_tenant_override_period: 230s - query_timeout: 5m -+shard_streams: -+ enabled: true -+ desired_rate: 4mb -+ logging_enabled: true - ` - inputJSON := ` - { -@@ -108,7 +112,12 @@ query_timeout: 5m - }, - ""per_tenant_override_config"": """", - ""per_tenant_override_period"": ""230s"", -- ""query_timeout"": ""5m"" -+ ""query_timeout"": ""5m"", -+ ""shard_streams"": { -+ ""desired_rate"": ""4mb"", -+ ""enabled"": true, -+ ""logging_enabled"": true -+ } - } - `",Loki,"Per-tenant stream sharding (#7311) - -**What this PR does / why we need it**: -- Move stream sharding configuration to its own package to avoid cyclic -imports -- Change stream sharding to be a per-tenant configuration -- Change ingesters to reject whole streams due to rate-limit based on -per-tenant stream sharding -- Change stream sharding flags prefix from `distributor.shard-stream` to -`shard-stream`" -4455cd9d7d173896969d1d3589b2e9084af393c2,2023-11-21 20:54:17,Quentin Bisson,"[helm] Fix tracing configuration (#11186) - -**What this PR does / why we need it**: - -This PR allows user to enable tracing in the new SSD setup and fixes -incorrect documentation because it is currently impossible to enable -tracing in this chart (cf. -https://github.com/grafana/loki/blob/766f27645d2610a36eaaca8418482b740ae14215/cmd/loki/main.go#L81) - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - -**Checklist** -- [x] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [x] Documentation added -- [x] Tests updated -- [ ] `CHANGELOG.md` updated -- [ ] If the change is worth mentioning in the release notes, add -`add-to-release-notes` label -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/setup/upgrade/_index.md` -- [ ] For Helm chart changes bump the Helm chart version in -`production/helm/loki/Chart.yaml` and update -`production/helm/loki/CHANGELOG.md` and -`production/helm/loki/README.md`. [Example -PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) -- [ ] If the change is deprecating or removing a configuration option, -update the `deprecated-config.yaml` and `deleted-config.yaml` files -respectively in the `tools/deprecated-config-checker` directory. -[Example -PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) - ---------- - -Signed-off-by: QuentinBisson ",False,"diff --git a/docs/sources/operations/troubleshooting.md b/docs/sources/operations/troubleshooting.md -index fd65e9a4d9a97..9fd4e4b8dcf38 100644 ---- a/docs/sources/operations/troubleshooting.md -+++ b/docs/sources/operations/troubleshooting.md -@@ -173,7 +173,11 @@ Jaeger is running. - If you deploy with Helm, use the following command: - - ```bash --$ helm upgrade --install loki loki/loki --set ""loki.tracing.jaegerAgentHost=YOUR_JAEGER_AGENT_HOST"" -+$ helm upgrade --install loki loki/loki --set ""loki.tracing.enabled=true"" -+ --set ""read.extraEnv[0].name=JAEGER_AGENT_HOST"" --set ""read.extraEnv[0].value="" -+ --set ""write.extraEnv[0].name=JAEGER_AGENT_HOST"" --set ""write.extraEnv[0].value="" -+ --set ""backend.extraEnv[0].name=JAEGER_AGENT_HOST"" --set ""backend.extraEnv[0].value="" -+ --set ""gateway.extraEnv[0].name=JAEGER_AGENT_HOST"" --set ""gateway.extraEnv[0].value="" - ``` - - ## Running Loki with Istio Sidecars -diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md -index 833cc2c77edc8..ede76840c8f6c 100644 ---- a/docs/sources/setup/install/helm/reference.md -+++ b/docs/sources/setup/install/helm/reference.md -@@ -2297,6 +2297,17 @@ null -
- []
- 
-+ -+ -+ -+ loki.tracing -+ object -+ Enable tracing -+
-+{
-+  ""enabled"": false
-+}
-+
- - - -@@ -4393,15 +4404,6 @@ null -
- ""1m""
- 
-- -- -- -- tracing.jaegerAgentHost -- string -- --
--""""
--
- - - -diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md -index 7f45b3155661c..51dd2deb2be54 100644 ---- a/production/helm/loki/CHANGELOG.md -+++ b/production/helm/loki/CHANGELOG.md -@@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang - - [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) - -+## 5.37.0 -+ -+- [FEATURE] Add support for enabling tracing. -+ - ## 5.36.2 - - - [BUGFIX] Add support to run dnsmasq -diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml -index 06768ba93d2d1..39e800d6193e0 100644 ---- a/production/helm/loki/Chart.yaml -+++ b/production/helm/loki/Chart.yaml -@@ -3,7 +3,7 @@ name: loki - description: Helm chart for Grafana Loki in simple, scalable mode - type: application - appVersion: 2.9.2 --version: 5.36.3 -+version: 5.37.0 - home: https://grafana.github.io/helm-charts - sources: - - https://github.com/grafana/loki -diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md -index b5cd5883819aa..7fc83086785b3 100644 ---- a/production/helm/loki/README.md -+++ b/production/helm/loki/README.md -@@ -1,6 +1,6 @@ - # loki - --![Version: 5.36.3](https://img.shields.io/badge/Version-5.36.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.2](https://img.shields.io/badge/AppVersion-2.9.2-informational?style=flat-square) -+![Version: 5.37.0](https://img.shields.io/badge/Version-5.37.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.2](https://img.shields.io/badge/AppVersion-2.9.2-informational?style=flat-square) - - Helm chart for Grafana Loki in simple, scalable mode - -diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml -index de6048aecc712..472882a226c8b 100644 ---- a/production/helm/loki/values.yaml -+++ b/production/helm/loki/values.yaml -@@ -240,6 +240,9 @@ loki: - distributor: - {{- tpl (. | toYaml) $ | nindent 4 }} - {{- end }} -+ -+ tracing: -+ enabled: {{ .Values.loki.tracing.enabled }} - # Should authentication be enabled - auth_enabled: true - # -- memberlist configuration (overrides embedded default) -@@ -344,6 +347,9 @@ loki: - scheduler_address: '{{ include ""loki.querySchedulerAddress"" . }}' - # -- Optional distributor configuration - distributor: {} -+ # -- Enable tracing -+ tracing: -+ enabled: false - enterprise: - # Enable enterprise features, license must be provided - enabled: false -@@ -1474,8 +1480,6 @@ networkPolicy: - podSelector: {} - # -- Specifies the namespace the discovery Pods are running in - namespaceSelector: {} --tracing: -- jaegerAgentHost: """" - # ------------------------------------- - # Configuration for `minio` child chart - # -------------------------------------",unknown,"[helm] Fix tracing configuration (#11186) - -**What this PR does / why we need it**: - -This PR allows user to enable tracing in the new SSD setup and fixes -incorrect documentation because it is currently impossible to enable -tracing in this chart (cf. -https://github.com/grafana/loki/blob/766f27645d2610a36eaaca8418482b740ae14215/cmd/loki/main.go#L81) - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - -**Checklist** -- [x] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [x] Documentation added -- [x] Tests updated -- [ ] `CHANGELOG.md` updated -- [ ] If the change is worth mentioning in the release notes, add -`add-to-release-notes` label -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/setup/upgrade/_index.md` -- [ ] For Helm chart changes bump the Helm chart version in -`production/helm/loki/Chart.yaml` and update -`production/helm/loki/CHANGELOG.md` and -`production/helm/loki/README.md`. [Example -PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) -- [ ] If the change is deprecating or removing a configuration option, -update the `deprecated-config.yaml` and `deleted-config.yaml` files -respectively in the `tools/deprecated-config-checker` directory. -[Example -PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) - ---------- - -Signed-off-by: QuentinBisson " -e65f26d30f9742d407fc6aa1e32dba3320952620,2022-04-07 19:17:50,Tat Chiu Leung,"storage: make Azure blobID chunk delimiter configurable (#5777) - -* Make Azure chunk delimiter configurable - -* Changelog: #5777 - -* doc update",False,"diff --git a/CHANGELOG.md b/CHANGELOG.md -index 406ae6051bee2..4b19e02113a68 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -1,5 +1,6 @@ - ## Main - * [5780](https://github.com/grafana/loki/pull/5780) **simonswine**: Update alpine image to 3.15.4. -+* [5777](https://github.com/grafana/loki/pull/5777) **tatchiuleung** storage: make Azure blobID chunk delimiter configurable. - * [5715](https://github.com/grafana/loki/pull/5715) **chaudum** Add option to push RFC5424 syslog messages from Promtail in syslog scrape target. - * [5696](https://github.com/grafana/loki/pull/5696) **paullryan** don't block scraping of new logs from cloudflare within promtail if an error is received from cloudflare about too early logs. - * [5685](https://github.com/grafana/loki/pull/5625) **chaudum** Fix bug in push request parser that allowed users to send arbitrary non-string data as ""log line"". -diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md -index ea08d3d6888b9..33612a53ca375 100644 ---- a/docs/sources/configuration/_index.md -+++ b/docs/sources/configuration/_index.md -@@ -743,6 +743,10 @@ The `azure_storage_config` configures Azure as a general storage for different d - # CLI flag: -.azure.account-key - [account_key: | default = """"] - -+# Chunk delimiter to build the blobID -+# CLI flag: -.azure.chunk-delimiter -+[chunk_delimiter: | default = ""-""] -+ - # Preallocated buffer size for downloads. - # CLI flag: -.azure.download-buffer-size - [download_buffer_size: | default = 512000] -diff --git a/pkg/storage/chunk/azure/blob_storage_client.go b/pkg/storage/chunk/azure/blob_storage_client.go -index ce5807103ec84..f475136c169c6 100644 ---- a/pkg/storage/chunk/azure/blob_storage_client.go -+++ b/pkg/storage/chunk/azure/blob_storage_client.go -@@ -85,6 +85,7 @@ type BlobStorageConfig struct { - Environment string `yaml:""environment""` - ContainerName string `yaml:""container_name""` - AccountName string `yaml:""account_name""` -+ ChunkDelimiter string `yaml:""chunk_delimiter""` - AccountKey flagext.Secret `yaml:""account_key""` - DownloadBufferSize int `yaml:""download_buffer_size""` - UploadBufferSize int `yaml:""upload_buffer_size""` -@@ -106,6 +107,7 @@ func (c *BlobStorageConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagS - f.StringVar(&c.Environment, prefix+""azure.environment"", azureGlobal, fmt.Sprintf(""Azure Cloud environment. Supported values are: %s."", strings.Join(supportedEnvironments, "", ""))) - f.StringVar(&c.ContainerName, prefix+""azure.container-name"", ""cortex"", ""Name of the blob container used to store chunks. This container must be created before running cortex."") - f.StringVar(&c.AccountName, prefix+""azure.account-name"", """", ""The Microsoft Azure account name to be used"") -+ f.StringVar(&c.ChunkDelimiter, prefix+""azure.chunk-delimiter"", ""-"", ""Chunk delimiter for blob ID to be used"") - f.Var(&c.AccountKey, prefix+""azure.account-key"", ""The Microsoft Azure account key to use."") - f.DurationVar(&c.RequestTimeout, prefix+""azure.request-timeout"", 30*time.Second, ""Timeout for requests made against azure blob storage."") - f.IntVar(&c.DownloadBufferSize, prefix+""azure.download-buffer-size"", 512000, ""Preallocated buffer size for downloads."") -@@ -251,7 +253,7 @@ func (b *BlobStorage) PutObject(ctx context.Context, objectKey string, object io - } - - func (b *BlobStorage) getBlobURL(blobID string, hedging bool) (azblob.BlockBlobURL, error) { -- blobID = strings.Replace(blobID, "":"", ""-"", -1) -+ blobID = strings.Replace(blobID, "":"", b.cfg.ChunkDelimiter, -1) - - // generate url for new chunk blob - u, err := url.Parse(fmt.Sprintf(b.selectBlobURLFmt(), b.cfg.AccountName, b.cfg.ContainerName, blobID))",storage,"make Azure blobID chunk delimiter configurable (#5777) - -* Make Azure chunk delimiter configurable - -* Changelog: #5777 - -* doc update" -9e19ff006ccf65e3bdba30d348325c0b41825ecd,2023-07-11 22:35:58,Trevor Whitney,"Add targetLabels to SeriesVolume requests (#9878) - -Adds optional `targetLabels` parameter to `series_volume` and -`series_volume_range` requests that controls how volumes are aggregated. -When provided, volumes are aggregated into the intersections of the -provided `targetLabels` only.",False,"diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go -index 01ac5dfd499b6..dc3a5408a952d 100644 ---- a/pkg/ingester/flush_test.go -+++ b/pkg/ingester/flush_test.go -@@ -352,7 +352,7 @@ func (s *testStore) Stats(_ context.Context, _ string, _, _ model.Time, _ ...*la - return &stats.Stats{}, nil - } - --func (s *testStore) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) { -+func (s *testStore) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ []string, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) { - return &logproto.VolumeResponse{}, nil - } - -diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go -index 474ffe51ce535..3968090e816cb 100644 ---- a/pkg/ingester/ingester.go -+++ b/pkg/ingester/ingester.go -@@ -173,7 +173,7 @@ type ChunkStore interface { - GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error) - GetSchemaConfigs() []config.PeriodConfig - Stats(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) (*index_stats.Stats, error) -- SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) -+ SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) - } - - // Interface is an interface for the Ingester -@@ -1164,7 +1164,7 @@ func (i *Ingester) GetSeriesVolume(ctx context.Context, req *logproto.VolumeRequ - return instance.GetSeriesVolume(ctx, req) - }), - f(func() (*logproto.VolumeResponse, error) { -- return i.store.SeriesVolume(ctx, user, req.From, req.Through, req.Limit, matchers...) -+ return i.store.SeriesVolume(ctx, user, req.From, req.Through, req.Limit, req.TargetLabels, matchers...) - }), - } - resps := make([]*logproto.VolumeResponse, len(jobs)) -diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go -index 293885f3689cc..a9b84fb18cb7f 100644 ---- a/pkg/ingester/ingester_test.go -+++ b/pkg/ingester/ingester_test.go -@@ -470,7 +470,7 @@ func (s *mockStore) Stats(_ context.Context, _ string, _, _ model.Time, _ ...*la - }, nil - } - --func (s *mockStore) SeriesVolume(_ context.Context, _ string, _, _ model.Time, limit int32, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) { -+func (s *mockStore) SeriesVolume(_ context.Context, _ string, _, _ model.Time, limit int32, _ []string, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) { - return &logproto.VolumeResponse{ - Volumes: []logproto.Volume{ - {Name: `{foo=""bar""}`, Volume: 38}, -diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go -index 1e7ce267ed5f6..8f637b626a891 100644 ---- a/pkg/ingester/instance.go -+++ b/pkg/ingester/instance.go -@@ -629,16 +629,8 @@ func (i *instance) GetSeriesVolume(ctx context.Context, req *logproto.VolumeRequ - return nil, err - } - -- matchAny := len(matchers) == 0 -- labelsToMatch := make(map[string]struct{}) -- for _, m := range matchers { -- if m.Name == """" { -- matchAny = true -- continue -- } -- -- labelsToMatch[m.Name] = struct{}{} -- } -+ labelsToMatch, matchers, matchAny := util.PrepareLabelsAndMatchers(req.TargetLabels, matchers) -+ matchAny = matchAny || len(matchers) == 0 - - seriesNames := make(map[uint64]string) - seriesLabels := labels.Labels(make([]labels.Label, 0, len(labelsToMatch))) -diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go -index ba74858520dad..c31f2949ceeee 100644 ---- a/pkg/ingester/instance_test.go -+++ b/pkg/ingester/instance_test.go -@@ -900,6 +900,57 @@ func TestInstance_SeriesVolume(t *testing.T) { - {Name: `{host=""agent"", job=""3"", log_stream=""dispatcher""}`, Volume: 90}, - }, volumes.Volumes) - }) -+ -+ t.Run(""with targetLabels"", func(t *testing.T) { -+ t.Run(""all targetLabels are added to matchers"", func(t *testing.T) { -+ instance := defaultInstance(t) -+ volumes, err := instance.GetSeriesVolume(context.Background(), &logproto.VolumeRequest{ -+ From: 0, -+ Through: 1.1 * 1e3, //milliseconds -+ Matchers: `{}`, -+ Limit: 2, -+ TargetLabels: []string{""log_stream""}, -+ }) -+ require.NoError(t, err) -+ -+ require.Equal(t, []logproto.Volume{ -+ {Name: `{log_stream=""dispatcher""}`, Volume: 90}, -+ {Name: `{log_stream=""worker""}`, Volume: 70}, -+ }, volumes.Volumes) -+ }) -+ -+ t.Run(""with a specific equals matcher"", func(t *testing.T) { -+ instance := defaultInstance(t) -+ volumes, err := instance.GetSeriesVolume(context.Background(), &logproto.VolumeRequest{ -+ From: 0, -+ Through: 1.1 * 1e3, //milliseconds -+ Matchers: `{log_stream=""dispatcher""}`, -+ Limit: 2, -+ TargetLabels: []string{""host""}, -+ }) -+ require.NoError(t, err) -+ -+ require.Equal(t, []logproto.Volume{ -+ {Name: `{host=""agent""}`, Volume: 90}, -+ }, volumes.Volumes) -+ }) -+ -+ t.Run(""with a specific regexp matcher"", func(t *testing.T) { -+ instance := defaultInstance(t) -+ volumes, err := instance.GetSeriesVolume(context.Background(), &logproto.VolumeRequest{ -+ From: 0, -+ Through: 1.1 * 1e3, //milliseconds -+ Matchers: `{log_stream=~"".+""}`, -+ Limit: 2, -+ TargetLabels: []string{""host"", ""job""}, -+ }) -+ require.NoError(t, err) -+ -+ require.Equal(t, []logproto.Volume{ -+ {Name: `{host=""agent"", job=""3""}`, Volume: 160}, -+ }, volumes.Volumes) -+ }) -+ }) - } - - func TestGetStats(t *testing.T) { -diff --git a/pkg/loghttp/query.go b/pkg/loghttp/query.go -index 03bb05b9d60a0..0f6a49f1982bc 100644 ---- a/pkg/loghttp/query.go -+++ b/pkg/loghttp/query.go -@@ -4,6 +4,7 @@ import ( - ""errors"" - ""fmt"" - ""net/http"" -+ ""strings"" - ""time"" - ""unsafe"" - -@@ -348,11 +349,11 @@ func ParseIndexStatsQuery(r *http.Request) (*RangeQuery, error) { - } - - type SeriesVolumeInstantQuery struct { -- Start time.Time -- End time.Time -- Query string -- Ts time.Time -- Limit uint32 -+ Start time.Time -+ End time.Time -+ Query string -+ Limit uint32 -+ TargetLabels []string - } - - func ParseSeriesVolumeInstantQuery(r *http.Request) (*SeriesVolumeInstantQuery, error) { -@@ -367,9 +368,9 @@ func ParseSeriesVolumeInstantQuery(r *http.Request) (*SeriesVolumeInstantQuery, - } - - svInstantQuery := SeriesVolumeInstantQuery{ -- Query: result.Query, -- Ts: result.Ts, -- Limit: result.Limit, -+ Query: result.Query, -+ Limit: result.Limit, -+ TargetLabels: targetLabels(r), - } - - svInstantQuery.Start, svInstantQuery.End, err = bounds(r) -@@ -385,12 +386,12 @@ func ParseSeriesVolumeInstantQuery(r *http.Request) (*SeriesVolumeInstantQuery, - } - - type SeriesVolumeRangeQuery struct { -- Start time.Time -- End time.Time -- Step time.Duration -- Interval time.Duration -- Query string -- Limit uint32 -+ Start time.Time -+ End time.Time -+ Step time.Duration -+ Query string -+ Limit uint32 -+ TargetLabels []string - } - - func ParseSeriesVolumeRangeQuery(r *http.Request) (*SeriesVolumeRangeQuery, error) { -@@ -405,15 +406,24 @@ func ParseSeriesVolumeRangeQuery(r *http.Request) (*SeriesVolumeRangeQuery, erro - } - - return &SeriesVolumeRangeQuery{ -- Start: result.Start, -- End: result.End, -- Step: result.Step, -- Interval: result.Interval, -- Query: result.Query, -- Limit: result.Limit, -+ Start: result.Start, -+ End: result.End, -+ Step: result.Step, -+ Query: result.Query, -+ Limit: result.Limit, -+ TargetLabels: targetLabels(r), - }, nil - } - -+func targetLabels(r *http.Request) []string { -+ lbls := strings.Split(r.Form.Get(""targetLabels""), "","") -+ if (len(lbls) == 1 && lbls[0] == """") || len(lbls) == 0 { -+ return nil -+ } -+ -+ return lbls -+} -+ - func labelVolumeLimit(r *http.Request) error { - l, err := parseInt(r.Form.Get(""limit""), seriesvolume.DefaultLimit) - if err != nil { -diff --git a/pkg/loghttp/query_test.go b/pkg/loghttp/query_test.go -index eb841c16935d8..96192fcc5a79c 100644 ---- a/pkg/loghttp/query_test.go -+++ b/pkg/loghttp/query_test.go -@@ -268,3 +268,57 @@ func Test_QueryResponseUnmarshal(t *testing.T) { - }) - } - } -+ -+func Test_ParseSeriesVolumeInstantQuery(t *testing.T) { -+ req := &http.Request{ -+ URL: mustParseURL(`?query={foo=""bar""}` + -+ `&start=2017-06-10T21:42:24.760738998Z` + -+ `&end=2017-07-10T21:42:24.760738998Z` + -+ `&limit=1000` + -+ `&targetLabels=foo,bar`, -+ ), -+ } -+ -+ err := req.ParseForm() -+ require.NoError(t, err) -+ -+ actual, err := ParseSeriesVolumeInstantQuery(req) -+ require.NoError(t, err) -+ -+ expected := &SeriesVolumeInstantQuery{ -+ Start: time.Date(2017, 06, 10, 21, 42, 24, 760738998, time.UTC), -+ End: time.Date(2017, 07, 10, 21, 42, 24, 760738998, time.UTC), -+ Query: `{foo=""bar""}`, -+ Limit: 1000, -+ TargetLabels: []string{""foo"", ""bar""}, -+ } -+ require.Equal(t, expected, actual) -+} -+ -+func Test_ParseSeriesVolumeRangeQuery(t *testing.T) { -+ req := &http.Request{ -+ URL: mustParseURL(`?query={foo=""bar""}` + -+ `&start=2017-06-10T21:42:24.760738998Z` + -+ `&end=2017-07-10T21:42:24.760738998Z` + -+ `&limit=1000` + -+ `&step=3600` + -+ `&targetLabels=foo,bar`, -+ ), -+ } -+ -+ err := req.ParseForm() -+ require.NoError(t, err) -+ -+ actual, err := ParseSeriesVolumeRangeQuery(req) -+ require.NoError(t, err) -+ -+ expected := &SeriesVolumeRangeQuery{ -+ Start: time.Date(2017, 06, 10, 21, 42, 24, 760738998, time.UTC), -+ End: time.Date(2017, 07, 10, 21, 42, 24, 760738998, time.UTC), -+ Query: `{foo=""bar""}`, -+ Limit: 1000, -+ Step: time.Hour, -+ TargetLabels: []string{""foo"", ""bar""}, -+ } -+ require.Equal(t, expected, actual) -+} -diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go -index 4962a04f24d4a..5e5991f5ff0fe 100644 ---- a/pkg/logproto/logproto.pb.go -+++ b/pkg/logproto/logproto.pb.go -@@ -2286,11 +2286,12 @@ func (m *IndexStatsResponse) GetEntries() uint64 { - } - - type VolumeRequest struct { -- From github_com_prometheus_common_model.Time `protobuf:""varint,1,opt,name=from,proto3,customtype=github.com/prometheus/common/model.Time"" json:""from""` -- Through github_com_prometheus_common_model.Time `protobuf:""varint,2,opt,name=through,proto3,customtype=github.com/prometheus/common/model.Time"" json:""through""` -- Matchers string `protobuf:""bytes,3,opt,name=matchers,proto3"" json:""matchers,omitempty""` -- Limit int32 `protobuf:""varint,4,opt,name=limit,proto3"" json:""limit,omitempty""` -- Step int64 `protobuf:""varint,5,opt,name=step,proto3"" json:""step,omitempty""` -+ From github_com_prometheus_common_model.Time `protobuf:""varint,1,opt,name=from,proto3,customtype=github.com/prometheus/common/model.Time"" json:""from""` -+ Through github_com_prometheus_common_model.Time `protobuf:""varint,2,opt,name=through,proto3,customtype=github.com/prometheus/common/model.Time"" json:""through""` -+ Matchers string `protobuf:""bytes,3,opt,name=matchers,proto3"" json:""matchers,omitempty""` -+ Limit int32 `protobuf:""varint,4,opt,name=limit,proto3"" json:""limit,omitempty""` -+ Step int64 `protobuf:""varint,5,opt,name=step,proto3"" json:""step,omitempty""` -+ TargetLabels []string `protobuf:""bytes,6,rep,name=targetLabels,proto3"" json:""targetLabels,omitempty""` - } - - func (m *VolumeRequest) Reset() { *m = VolumeRequest{} } -@@ -2346,6 +2347,13 @@ func (m *VolumeRequest) GetStep() int64 { - return 0 - } - -+func (m *VolumeRequest) GetTargetLabels() []string { -+ if m != nil { -+ return m.TargetLabels -+ } -+ return nil -+} -+ - type VolumeResponse struct { - Volumes []Volume `protobuf:""bytes,1,rep,name=volumes,proto3"" json:""volumes""` - Limit int32 `protobuf:""varint,2,opt,name=limit,proto3"" json:""limit,omitempty""` -@@ -2501,146 +2509,147 @@ func init() { - func init() { proto.RegisterFile(""pkg/logproto/logproto.proto"", fileDescriptor_c28a5f14f1f4c79a) } - - var fileDescriptor_c28a5f14f1f4c79a = []byte{ -- // 2213 bytes of a gzipped FileDescriptorProto -+ // 2228 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x39, 0xcd, 0x6f, 0x1b, 0xc7, -- 0xf5, 0x1c, 0x72, 0x49, 0x91, 0x8f, 0xd4, 0x87, 0x47, 0x8c, 0xad, 0x1f, 0x6d, 0x93, 0xf2, 0x20, -- 0x3f, 0x5b, 0xb0, 0x1d, 0x32, 0x56, 0xda, 0xd4, 0xb1, 0x9b, 0x16, 0xa6, 0x14, 0x3b, 0xf2, 0x77, -- 0x46, 0xae, 0x5b, 0x04, 0x0d, 0x8c, 0x15, 0x39, 0xfc, 0x80, 0xb9, 0x5c, 0x7a, 0x77, 0x19, 0x47, -- 0x40, 0x0f, 0xfd, 0x07, 0x02, 0xe4, 0x56, 0xf4, 0x52, 0xf4, 0x50, 0xa0, 0x45, 0x81, 0x5e, 0xfa, -- 0x07, 0xb4, 0x3d, 0x14, 0xa8, 0x7b, 0x73, 0x6f, 0x41, 0x0f, 0x6c, 0x2d, 0xa3, 0x40, 0xa1, 0x53, -- 0xfe, 0x81, 0x16, 0xc5, 0x7c, 0xed, 0xce, 0xae, 0xa8, 0x24, 0x74, 0x0d, 0x14, 0xbe, 0x88, 0xfb, -- 0xde, 0xbc, 0x79, 0xf3, 0xbe, 0xdf, 0xbc, 0x11, 0x1c, 0x1f, 0x3d, 0xec, 0x36, 0x06, 0x6e, 0x77, -- 0xe4, 0xb9, 0x81, 0x1b, 0x7e, 0xd4, 0xc5, 0x5f, 0x9c, 0xd7, 0x70, 0xa5, 0xdc, 0x75, 0xbb, 0xae, -- 0xa4, 0xe1, 0x5f, 0x72, 0xbd, 0x52, 0xeb, 0xba, 0x6e, 0x77, 0xc0, 0x1a, 0x02, 0xda, 0x19, 0x77, -- 0x1a, 0x41, 0xdf, 0x61, 0x7e, 0x60, 0x3b, 0x23, 0x45, 0xb0, 0xaa, 0xb8, 0x3f, 0x1a, 0x38, 0x6e, -- 0x9b, 0x0d, 0x1a, 0x7e, 0x60, 0x07, 0xbe, 0xfc, 0xab, 0x28, 0x96, 0x39, 0xc5, 0x68, 0xec, 0xf7, -- 0xc4, 0x1f, 0x89, 0x24, 0x65, 0xc0, 0xdb, 0x81, 0xc7, 0x6c, 0x87, 0xda, 0x01, 0xf3, 0x29, 0x7b, -- 0x34, 0x66, 0x7e, 0x40, 0x6e, 0xc1, 0x72, 0x0c, 0xeb, 0x8f, 0xdc, 0xa1, 0xcf, 0xf0, 0xdb, 0x50, -- 0xf4, 0x23, 0xf4, 0x0a, 0x5a, 0xcd, 0xac, 0x15, 0xd7, 0xcb, 0xf5, 0x50, 0x95, 0x68, 0x0f, 0x35, -- 0x09, 0xc9, 0xcf, 0x10, 0x40, 0xb4, 0x86, 0xab, 0x00, 0x72, 0xf5, 0x7d, 0xdb, 0xef, 0xad, 0xa0, -- 0x55, 0xb4, 0x66, 0x51, 0x03, 0x83, 0xcf, 0xc3, 0x91, 0x08, 0xba, 0xed, 0x6e, 0xf7, 0x6c, 0xaf, -- 0xbd, 0x92, 0x16, 0x64, 0x07, 0x17, 0x30, 0x06, 0xcb, 0xb3, 0x03, 0xb6, 0x92, 0x59, 0x45, 0x6b, -- 0x19, 0x2a, 0xbe, 0xf1, 0x51, 0xc8, 0x05, 0x6c, 0x68, 0x0f, 0x83, 0x15, 0x6b, 0x15, 0xad, 0x15, -- 0xa8, 0x82, 0x38, 0x9e, 0xeb, 0xce, 0xfc, 0x95, 0xec, 0x2a, 0x5a, 0x9b, 0xa7, 0x0a, 0x22, 0x7f, -- 0x4a, 0x43, 0xe9, 0x83, 0x31, 0xf3, 0x76, 0x95, 0x01, 0x70, 0x05, 0xf2, 0x3e, 0x1b, 0xb0, 0x56, -- 0xe0, 0x7a, 0x42, 0xc0, 0x02, 0x0d, 0x61, 0x5c, 0x86, 0xec, 0xa0, 0xef, 0xf4, 0x03, 0x21, 0xd2, -- 0x3c, 0x95, 0x00, 0xbe, 0x04, 0x59, 0x3f, 0xb0, 0xbd, 0x40, 0xc8, 0x51, 0x5c, 0xaf, 0xd4, 0xa5, -- 0xc3, 0xea, 0xda, 0x61, 0xf5, 0x7b, 0xda, 0x61, 0xcd, 0xfc, 0x93, 0x49, 0x2d, 0xf5, 0xd9, 0xdf, -- 0x6a, 0x88, 0xca, 0x2d, 0xf8, 0x6d, 0xc8, 0xb0, 0x61, 0x5b, 0xc8, 0xfa, 0x75, 0x77, 0xf2, 0x0d, -- 0xf8, 0x02, 0x14, 0xda, 0x7d, 0x8f, 0xb5, 0x82, 0xbe, 0x3b, 0x14, 0x1a, 0x2d, 0xac, 0x2f, 0x47, -- 0xde, 0xd8, 0xd4, 0x4b, 0x34, 0xa2, 0xc2, 0xe7, 0x21, 0xe7, 0x73, 0xb3, 0xf9, 0x2b, 0x73, 0xab, -- 0x99, 0xb5, 0x42, 0xb3, 0xbc, 0x3f, 0xa9, 0x2d, 0x49, 0xcc, 0x79, 0xd7, 0xe9, 0x07, 0xcc, 0x19, -- 0x05, 0xbb, 0x54, 0xd1, 0xe0, 0xb3, 0x30, 0xd7, 0x66, 0x03, 0xc6, 0x9d, 0x9d, 0x17, 0xce, 0x5e, -- 0x32, 0xd8, 0x8b, 0x05, 0xaa, 0x09, 0xae, 0x5b, 0xf9, 0xdc, 0xd2, 0x1c, 0xf9, 0x37, 0x02, 0xbc, -- 0x6d, 0x3b, 0xa3, 0x01, 0xfb, 0xda, 0xf6, 0x0c, 0x2d, 0x97, 0x7e, 0x61, 0xcb, 0x65, 0x66, 0xb5, -- 0x5c, 0x64, 0x06, 0x6b, 0x36, 0x33, 0x64, 0xbf, 0xc2, 0x0c, 0xe4, 0x26, 0xe4, 0x24, 0xea, 0xab, -- 0x62, 0x28, 0xd2, 0x39, 0xa3, 0xb5, 0x59, 0x8a, 0xb4, 0xc9, 0x08, 0x39, 0xc9, 0xcf, 0x11, 0xcc, -- 0x2b, 0x43, 0xaa, 0x1c, 0xdc, 0x81, 0x39, 0x99, 0x03, 0x3a, 0xff, 0x8e, 0x25, 0xf3, 0xef, 0x4a, -- 0xdb, 0x1e, 0x05, 0xcc, 0x6b, 0x36, 0x9e, 0x4c, 0x6a, 0xe8, 0xaf, 0x93, 0xda, 0x99, 0x6e, 0x3f, -- 0xe8, 0x8d, 0x77, 0xea, 0x2d, 0xd7, 0x69, 0x74, 0x3d, 0xbb, 0x63, 0x0f, 0xed, 0xc6, 0xc0, 0x7d, -- 0xd8, 0x6f, 0xe8, 0x7a, 0xa0, 0xf3, 0x56, 0x33, 0xc6, 0xe7, 0x84, 0x74, 0x81, 0xaf, 0x3c, 0xb2, -- 0x58, 0x97, 0x65, 0x64, 0x6b, 0xd8, 0x65, 0x3e, 0xe7, 0x6c, 0x71, 0x63, 0x52, 0x49, 0x43, 0x7e, -- 0x04, 0xcb, 0x31, 0x87, 0x2b, 0x39, 0x2f, 0x42, 0xce, 0x67, 0x5e, 0x3f, 0x2c, 0x13, 0x86, 0xc9, -- 0xb6, 0x05, 0xbe, 0xb9, 0xa0, 0xe4, 0xcb, 0x49, 0x98, 0x2a, 0xfa, 0xd9, 0x4e, 0xff, 0x23, 0x82, -- 0xd2, 0x4d, 0x7b, 0x87, 0x0d, 0x74, 0xa4, 0x61, 0xb0, 0x86, 0xb6, 0xc3, 0x94, 0xc5, 0xc5, 0x37, -- 0x4f, 0xfb, 0x8f, 0xed, 0xc1, 0x98, 0x49, 0x96, 0x79, 0xaa, 0xa0, 0x59, 0x73, 0x16, 0xbd, 0x70, -- 0xce, 0xa2, 0x28, 0xf2, 0xca, 0x90, 0x7d, 0xc4, 0x0d, 0x25, 0xf2, 0xb5, 0x40, 0x25, 0x40, 0xce, -- 0xc0, 0xbc, 0xd2, 0x42, 0x99, 0x2f, 0x12, 0x99, 0x9b, 0xaf, 0xa0, 0x45, 0x26, 0x0e, 0xe4, 0xa4, -- 0xb5, 0xf1, 0xeb, 0x50, 0x08, 0x7b, 0x80, 0xd0, 0x36, 0xd3, 0xcc, 0xed, 0x4f, 0x6a, 0xe9, 0xc0, -- 0xa7, 0xd1, 0x02, 0xae, 0x41, 0x56, 0xec, 0x14, 0x9a, 0xa3, 0x66, 0x61, 0x7f, 0x52, 0x93, 0x08, -- 0x2a, 0x7f, 0xf0, 0x09, 0xb0, 0x7a, 0xbc, 0x0c, 0x73, 0x13, 0x58, 0xcd, 0xfc, 0xfe, 0xa4, 0x26, -- 0x60, 0x2a, 0xfe, 0x92, 0x6b, 0x50, 0xba, 0xc9, 0xba, 0x76, 0x6b, 0x57, 0x1d, 0x5a, 0xd6, 0xec, -- 0xf8, 0x81, 0x48, 0xf3, 0x38, 0x05, 0xa5, 0xf0, 0xc4, 0x07, 0x8e, 0xaf, 0x82, 0xba, 0x18, 0xe2, -- 0x6e, 0xf9, 0xe4, 0xa7, 0x08, 0x94, 0x9f, 0x31, 0x81, 0xdc, 0x80, 0xeb, 0xea, 0x4b, 0x1f, 0x35, -- 0x61, 0x7f, 0x52, 0x53, 0x18, 0xaa, 0x7e, 0xf1, 0x65, 0x98, 0xf3, 0xc5, 0x89, 0x9c, 0x59, 0x32, -- 0x7c, 0xc4, 0x42, 0x73, 0x91, 0x87, 0xc1, 0xfe, 0xa4, 0xa6, 0x09, 0xa9, 0xfe, 0xc0, 0xf5, 0x58, -- 0x7f, 0x91, 0x8a, 0x2d, 0xec, 0x4f, 0x6a, 0x06, 0xd6, 0xec, 0x37, 0xe4, 0x27, 0x08, 0x8a, 0xf7, -- 0xec, 0x7e, 0x18, 0x42, 0xa1, 0x8b, 0x90, 0xe1, 0x22, 0x9e, 0xce, 0x6d, 0x36, 0xb0, 0x77, 0xaf, -- 0xba, 0x9e, 0xe0, 0x39, 0x4f, 0x43, 0x38, 0x6a, 0x09, 0xd6, 0xd4, 0x96, 0x90, 0x9d, 0xb9, 0xb0, -- 0x5d, 0xb7, 0xf2, 0xe9, 0xa5, 0x0c, 0xf9, 0x0d, 0x82, 0x92, 0x94, 0x4c, 0x85, 0xc5, 0x0f, 0x21, -- 0x27, 0x05, 0x17, 0xb2, 0x7d, 0x49, 0xf2, 0x9f, 0x9b, 0x25, 0xf1, 0x15, 0x4f, 0xfc, 0x5d, 0x58, -- 0x68, 0x7b, 0xee, 0x68, 0xc4, 0xda, 0xdb, 0xaa, 0xc4, 0xa4, 0x93, 0x25, 0x66, 0xd3, 0x5c, 0xa7, -- 0x09, 0x72, 0xf2, 0x67, 0x04, 0xf3, 0x2a, 0x9b, 0x95, 0x2d, 0x43, 0x1b, 0xa0, 0x17, 0x2e, 0xee, -- 0xe9, 0x59, 0x8b, 0xfb, 0x51, 0xc8, 0x75, 0x3d, 0x77, 0x3c, 0xf2, 0x57, 0x32, 0x32, 0x77, 0x24, -- 0x34, 0x5b, 0xd1, 0x27, 0xd7, 0x61, 0x41, 0xab, 0x72, 0x48, 0x49, 0xab, 0x24, 0x4b, 0xda, 0x56, -- 0x9b, 0x0d, 0x83, 0x7e, 0xa7, 0x1f, 0x16, 0x29, 0x45, 0x4f, 0x3e, 0x45, 0xb0, 0x94, 0x24, 0xc1, -- 0xdf, 0x31, 0xf2, 0x80, 0xb3, 0x3b, 0x7d, 0x38, 0xbb, 0xba, 0x28, 0x0e, 0xfe, 0x7b, 0xc3, 0xc0, -- 0xdb, 0xd5, 0x39, 0x52, 0x79, 0x07, 0x8a, 0x06, 0x9a, 0x37, 0x8f, 0x87, 0x4c, 0xc7, 0x2c, 0xff, -- 0x8c, 0x92, 0x35, 0x2d, 0xe3, 0x58, 0x00, 0x97, 0xd2, 0x17, 0x11, 0x8f, 0xf8, 0xf9, 0x98, 0x27, -- 0xf1, 0x45, 0xb0, 0x3a, 0x9e, 0xeb, 0xcc, 0xe4, 0x26, 0xb1, 0x03, 0x7f, 0x03, 0xd2, 0x81, 0x3b, -- 0x93, 0x93, 0xd2, 0x81, 0xcb, 0x7d, 0xa4, 0x94, 0xcf, 0xc8, 0x1b, 0x9a, 0x84, 0xc8, 0xaf, 0x11, -- 0x2c, 0xf2, 0x3d, 0xd2, 0x02, 0x1b, 0xbd, 0xf1, 0xf0, 0x21, 0x5e, 0x83, 0x25, 0x7e, 0xd2, 0x83, -- 0xbe, 0xea, 0x00, 0x0f, 0xfa, 0x6d, 0xa5, 0xe6, 0x02, 0xc7, 0xeb, 0xc6, 0xb0, 0xd5, 0xc6, 0xc7, -- 0x60, 0x6e, 0xec, 0x4b, 0x02, 0xa9, 0x73, 0x8e, 0x83, 0x5b, 0x6d, 0x7c, 0xce, 0x38, 0x8e, 0xdb, -- 0xda, 0xb8, 0x26, 0x09, 0x1b, 0xde, 0xb5, 0xfb, 0x5e, 0x58, 0x7c, 0xce, 0x40, 0xae, 0xc5, 0x0f, -- 0x96, 0x71, 0xc2, 0x3b, 0x50, 0x48, 0x2c, 0x04, 0xa2, 0x6a, 0x99, 0x7c, 0x13, 0x0a, 0xe1, 0xee, -- 0xa9, 0x8d, 0x67, 0xaa, 0x07, 0xc8, 0x65, 0x58, 0x94, 0x45, 0x75, 0xfa, 0xe6, 0xd2, 0xb4, 0xcd, -- 0x25, 0xbd, 0xf9, 0x38, 0x64, 0xa5, 0x55, 0x30, 0x58, 0x6d, 0x3b, 0xb0, 0xf5, 0x16, 0xfe, 0x4d, -- 0x56, 0xe0, 0xe8, 0x3d, 0xcf, 0x1e, 0xfa, 0x1d, 0xe6, 0x09, 0xa2, 0x30, 0x76, 0xc9, 0x6b, 0xb0, -- 0xcc, 0x0b, 0x09, 0xf3, 0xfc, 0x0d, 0x77, 0x3c, 0x0c, 0xf4, 0x45, 0xff, 0x3c, 0x94, 0xe3, 0x68, -- 0x15, 0xea, 0x65, 0xc8, 0xb6, 0x38, 0x42, 0x70, 0x9f, 0xa7, 0x12, 0x20, 0xbf, 0x40, 0x80, 0xaf, -- 0xb1, 0x40, 0xb0, 0xde, 0xda, 0xf4, 0x8d, 0xcb, 0x9d, 0x63, 0x07, 0xad, 0x1e, 0xf3, 0x7c, 0x7d, -- 0xd1, 0xd1, 0xf0, 0xff, 0xe2, 0x72, 0x47, 0x2e, 0xc0, 0x72, 0x4c, 0x4a, 0xa5, 0x53, 0x05, 0xf2, -- 0x2d, 0x85, 0x53, 0x4d, 0x35, 0x84, 0xc9, 0x6f, 0xd3, 0x90, 0x97, 0xbe, 0x65, 0x1d, 0x7c, 0x01, -- 0x8a, 0x1d, 0x1e, 0x6b, 0xde, 0xc8, 0xeb, 0x2b, 0x13, 0x58, 0xcd, 0xc5, 0xfd, 0x49, 0xcd, 0x44, -- 0x53, 0x13, 0xc0, 0x6f, 0x24, 0x02, 0xaf, 0x59, 0xde, 0x9b, 0xd4, 0x72, 0xdf, 0xe3, 0xc1, 0xb7, -- 0xc9, 0xdb, 0x9b, 0x08, 0xc3, 0xcd, 0x30, 0x1c, 0x6f, 0xa8, 0x6c, 0x13, 0x37, 0xbd, 0xe6, 0xb7, -- 0xb8, 0xf8, 0x89, 0x7a, 0x3d, 0xf2, 0x5c, 0x87, 0x05, 0x3d, 0x36, 0xf6, 0x1b, 0x2d, 0xd7, 0x71, -- 0xdc, 0x61, 0x43, 0x8c, 0x75, 0x42, 0x69, 0xde, 0xa3, 0xf9, 0x76, 0x95, 0x80, 0xf7, 0x60, 0x2e, -- 0xe8, 0x79, 0xee, 0xb8, 0xdb, 0x13, 0xed, 0x27, 0xd3, 0xbc, 0x34, 0x3b, 0x3f, 0xcd, 0x81, 0xea, -- 0x0f, 0x7c, 0x8a, 0x5b, 0x8b, 0xb5, 0x1e, 0xfa, 0x63, 0x47, 0x0e, 0x4b, 0xcd, 0xec, 0xfe, 0xa4, -- 0x86, 0xde, 0xa0, 0x21, 0x9a, 0x7c, 0x9a, 0x86, 0x9a, 0x08, 0xe1, 0xfb, 0xe2, 0x6e, 0x72, 0xd5, -- 0xf5, 0x6e, 0xb1, 0xc0, 0xeb, 0xb7, 0x6e, 0xdb, 0x0e, 0xd3, 0xb1, 0x51, 0x83, 0xa2, 0x23, 0x90, -- 0x0f, 0x8c, 0xe4, 0x00, 0x27, 0xa4, 0xc3, 0x27, 0x01, 0x44, 0xda, 0xc9, 0x75, 0x99, 0x27, 0x05, -- 0x81, 0x11, 0xcb, 0x1b, 0x31, 0x4b, 0x35, 0x66, 0xd4, 0x4c, 0x59, 0x68, 0x2b, 0x69, 0xa1, 0x99, -- 0xf9, 0x84, 0x66, 0x31, 0x63, 0x3d, 0x1b, 0x8f, 0x75, 0xf2, 0x17, 0x04, 0xd5, 0x9b, 0x5a, 0xf2, -- 0x17, 0x34, 0x87, 0xd6, 0x37, 0xfd, 0x92, 0xf4, 0xcd, 0xfc, 0x77, 0xfa, 0x92, 0x3f, 0x18, 0x29, -- 0x4f, 0x59, 0x47, 0xeb, 0xb1, 0x61, 0xb4, 0x8b, 0x97, 0x21, 0x66, 0xfa, 0x25, 0xba, 0x25, 0x93, -- 0x70, 0xcb, 0xbb, 0x51, 0x39, 0x10, 0x1a, 0xa8, 0x72, 0x70, 0x1a, 0x2c, 0x8f, 0x75, 0x74, 0xf3, -- 0xc5, 0xc9, 0x1a, 0xcf, 0x3a, 0x54, 0xac, 0x93, 0xdf, 0x21, 0x58, 0xba, 0xc6, 0x82, 0xf8, 0xb5, -- 0xe6, 0x55, 0xd2, 0xff, 0x7d, 0x38, 0x62, 0xc8, 0xaf, 0xb4, 0x7f, 0x2b, 0x71, 0x97, 0x79, 0x2d, -- 0xd2, 0x7f, 0x6b, 0xd8, 0x66, 0x9f, 0xa8, 0x19, 0x2d, 0x7e, 0x8d, 0xb9, 0x0b, 0x45, 0x63, 0x11, -- 0x5f, 0x49, 0x5c, 0x60, 0xa6, 0x35, 0xd5, 0x66, 0x59, 0xe9, 0x24, 0xa7, 0x34, 0x75, 0x3d, 0x0d, -- 0xdb, 0xfd, 0x36, 0x60, 0x31, 0x36, 0x0a, 0xb6, 0x66, 0xa5, 0x16, 0xd8, 0x1b, 0xe1, 0x7d, 0x26, -- 0x84, 0xf1, 0x29, 0xb0, 0x3c, 0xf7, 0xb1, 0xbe, 0x99, 0xce, 0x47, 0x47, 0x52, 0xf7, 0x31, 0x15, -- 0x4b, 0xe4, 0x32, 0x64, 0xa8, 0xfb, 0x18, 0x57, 0x01, 0x3c, 0x7b, 0xd8, 0x65, 0xf7, 0xc3, 0x81, -- 0xa5, 0x44, 0x0d, 0xcc, 0x21, 0xfd, 0x75, 0x03, 0x8e, 0x98, 0x12, 0x49, 0x77, 0xd7, 0x61, 0x8e, -- 0x23, 0xfb, 0xd3, 0x1e, 0xbd, 0x04, 0xa1, 0x9c, 0x7d, 0x35, 0x11, 0x8f, 0x19, 0x88, 0xf0, 0xf8, -- 0x04, 0x14, 0x02, 0x7b, 0x67, 0xc0, 0x6e, 0x47, 0x39, 0x1f, 0x21, 0xf8, 0x2a, 0x9f, 0xb5, 0xee, -- 0x1b, 0x17, 0x85, 0x08, 0x81, 0xcf, 0xc2, 0x52, 0x24, 0xf3, 0x5d, 0x8f, 0x75, 0xfa, 0x9f, 0x08, -- 0x0f, 0x97, 0xe8, 0x01, 0x3c, 0x5e, 0x83, 0xc5, 0x08, 0xb7, 0x2d, 0xda, 0xae, 0x25, 0x48, 0x93, -- 0x68, 0x6e, 0x1b, 0xa1, 0xee, 0x7b, 0x8f, 0xc6, 0xf6, 0x40, 0x14, 0xb2, 0x12, 0x35, 0x30, 0xe4, -- 0xf7, 0x08, 0x8e, 0x48, 0x57, 0xf3, 0x29, 0xfb, 0x55, 0x8c, 0xfa, 0x5f, 0x22, 0xc0, 0xa6, 0x06, -- 0x2a, 0xb4, 0xfe, 0xdf, 0x7c, 0x3e, 0xe1, 0x7d, 0xbd, 0x28, 0x46, 0x48, 0x89, 0x8a, 0x5e, 0x40, -- 0x48, 0x78, 0x05, 0x14, 0xef, 0x8e, 0x72, 0x46, 0x95, 0x18, 0x7d, 0xfb, 0xe3, 0xa3, 0xf5, 0xce, -- 0x6e, 0xc0, 0x7c, 0x35, 0x61, 0x8a, 0xd1, 0x5a, 0x20, 0xa8, 0xfc, 0xe1, 0x67, 0xb1, 0x61, 0x20, -- 0xa2, 0xc6, 0x8a, 0xce, 0x52, 0x28, 0xaa, 0x3f, 0xc8, 0x3f, 0x10, 0xcc, 0xdf, 0x77, 0x07, 0xe3, -- 0xa8, 0x4b, 0xbc, 0x42, 0x76, 0x8e, 0x8f, 0xbe, 0x59, 0x3d, 0xfa, 0x62, 0xb0, 0xfc, 0x80, 0x8d, -- 0x44, 0x64, 0x65, 0xa8, 0xf8, 0x26, 0x3f, 0x80, 0x05, 0xad, 0xa6, 0x72, 0xc6, 0x9b, 0x30, 0xf7, -- 0xb1, 0xc0, 0x4c, 0x79, 0x24, 0x92, 0xa4, 0xaa, 0x00, 0x69, 0xb2, 0xf8, 0xdb, 0xab, 0x3e, 0x8d, -- 0x5c, 0x87, 0x9c, 0x24, 0xc7, 0x27, 0xcc, 0x4b, 0xb8, 0x7c, 0xcd, 0xe0, 0xb0, 0xba, 0x51, 0x13, -- 0xc8, 0x49, 0x46, 0xca, 0x65, 0xc2, 0xab, 0x12, 0x43, 0xd5, 0xef, 0xd9, 0xd3, 0x50, 0x08, 0x1f, -- 0x4e, 0x71, 0x11, 0xe6, 0xae, 0xde, 0xa1, 0xdf, 0xbf, 0x42, 0x37, 0x97, 0x52, 0xb8, 0x04, 0xf9, -- 0xe6, 0x95, 0x8d, 0x1b, 0x02, 0x42, 0xeb, 0xff, 0xb2, 0x74, 0x4d, 0xf0, 0xf0, 0xb7, 0x21, 0x2b, -- 0x13, 0xfd, 0x68, 0x24, 0xbf, 0xf9, 0xfc, 0x59, 0x39, 0x76, 0x00, 0xaf, 0xae, 0xe5, 0xa9, 0x37, -- 0x11, 0xbe, 0x0d, 0x45, 0x81, 0x54, 0x4f, 0x2c, 0x27, 0x92, 0x2f, 0x1d, 0x31, 0x4e, 0x27, 0x0f, -- 0x59, 0x35, 0xf8, 0x5d, 0x82, 0xac, 0xa8, 0xb5, 0xa6, 0x34, 0xe6, 0x13, 0x99, 0x29, 0x4d, 0xec, -- 0xd1, 0x89, 0xa4, 0xf0, 0x3b, 0x60, 0xf1, 0x79, 0x00, 0x1b, 0xed, 0xc0, 0x78, 0x19, 0xa9, 0x1c, -- 0x4d, 0xa2, 0x8d, 0x63, 0xdf, 0x0d, 0x1f, 0x78, 0x8e, 0x25, 0x07, 0x59, 0xbd, 0x7d, 0xe5, 0xe0, -- 0x42, 0x78, 0xf2, 0x1d, 0xf9, 0xd2, 0xa1, 0x27, 0x11, 0x7c, 0x32, 0x7e, 0x54, 0x62, 0x70, 0xa9, -- 0x54, 0x0f, 0x5b, 0x0e, 0x19, 0xde, 0x84, 0xa2, 0x31, 0x05, 0x98, 0x66, 0x3d, 0x38, 0xc2, 0x98, -- 0x66, 0x9d, 0x32, 0x3a, 0x90, 0x14, 0xbe, 0x06, 0x79, 0xde, 0x44, 0x79, 0x2d, 0xc1, 0xc7, 0x93, -- 0xbd, 0xd2, 0xa8, 0x91, 0x95, 0x13, 0xd3, 0x17, 0x43, 0x46, 0x57, 0x61, 0x31, 0xec, 0xc6, 0x2a, -- 0x68, 0x8f, 0x25, 0xa3, 0x7e, 0x8a, 0xbd, 0xe2, 0x99, 0x43, 0x52, 0xeb, 0x1f, 0x41, 0x5e, 0x0f, -- 0xbe, 0xf8, 0x03, 0x58, 0x88, 0x8f, 0x7d, 0xf8, 0xff, 0x0c, 0xf3, 0xc4, 0xa7, 0xe9, 0xca, 0xaa, -- 0xb1, 0x34, 0x7d, 0x56, 0x4c, 0xad, 0xa1, 0xf5, 0x8f, 0xf4, 0x7f, 0x6c, 0x36, 0xed, 0xc0, 0xc6, -- 0x77, 0x60, 0x41, 0x68, 0x1f, 0xfe, 0x4b, 0x27, 0x16, 0xa5, 0x07, 0xfe, 0x7f, 0x14, 0x8b, 0xd2, -- 0x83, 0xff, 0x47, 0x22, 0xa9, 0xe6, 0x87, 0x4f, 0x9f, 0x55, 0x53, 0x9f, 0x3f, 0xab, 0xa6, 0xbe, -- 0x78, 0x56, 0x45, 0x3f, 0xde, 0xab, 0xa2, 0x5f, 0xed, 0x55, 0xd1, 0x93, 0xbd, 0x2a, 0x7a, 0xba, -- 0x57, 0x45, 0x7f, 0xdf, 0xab, 0xa2, 0x7f, 0xee, 0x55, 0x53, 0x5f, 0xec, 0x55, 0xd1, 0x67, 0xcf, -- 0xab, 0xa9, 0xa7, 0xcf, 0xab, 0xa9, 0xcf, 0x9f, 0x57, 0x53, 0x1f, 0xbe, 0xfe, 0x65, 0x0f, 0x5a, -- 0xfa, 0xc4, 0x9d, 0x9c, 0xf8, 0x79, 0xeb, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x23, 0x5c, -- 0x1e, 0x70, 0x1b, 0x00, 0x00, -+ 0xf5, 0x1c, 0x72, 0x49, 0x91, 0x8f, 0xd4, 0x87, 0x47, 0x8c, 0xcd, 0x1f, 0xe3, 0x90, 0xf2, 0x20, -+ 0x3f, 0x5b, 0xb0, 0x1d, 0x31, 0x56, 0xda, 0xd4, 0xb1, 0x9b, 0x16, 0xa6, 0x14, 0x3b, 0xf2, 0x77, -+ 0x46, 0xae, 0x5b, 0x04, 0x0d, 0x8c, 0x15, 0x39, 0xa4, 0x08, 0x73, 0xb9, 0xf4, 0xee, 0x30, 0x8e, -+ 0x80, 0x1e, 0x7a, 0xed, 0x21, 0x40, 0x6e, 0x45, 0x2f, 0x45, 0x0f, 0x05, 0x5a, 0x14, 0xe8, 0xa5, -+ 0x7f, 0x40, 0xdb, 0x43, 0x81, 0xba, 0x37, 0xf7, 0x16, 0xf4, 0xc0, 0xd6, 0xf2, 0xa5, 0xd0, 0x29, -+ 0xff, 0x40, 0x8b, 0x62, 0xbe, 0x76, 0x67, 0x29, 0x3a, 0x09, 0x5d, 0x03, 0x85, 0x2f, 0xe2, 0xbe, -+ 0x37, 0x6f, 0xde, 0xbc, 0xef, 0x37, 0x6f, 0x04, 0xaf, 0x0e, 0xef, 0x77, 0x1b, 0x7d, 0xbf, 0x3b, -+ 0x0c, 0x7c, 0xee, 0x47, 0x1f, 0x6b, 0xf2, 0x2f, 0xce, 0x1b, 0xb8, 0x5a, 0xee, 0xfa, 0x5d, 0x5f, -+ 0xd1, 0x88, 0x2f, 0xb5, 0x5e, 0xad, 0x77, 0x7d, 0xbf, 0xdb, 0x67, 0x0d, 0x09, 0xed, 0x8c, 0x3a, -+ 0x0d, 0xde, 0xf3, 0x58, 0xc8, 0x5d, 0x6f, 0xa8, 0x09, 0x56, 0x34, 0xf7, 0x07, 0x7d, 0xcf, 0x6f, -+ 0xb3, 0x7e, 0x23, 0xe4, 0x2e, 0x0f, 0xd5, 0x5f, 0x4d, 0xb1, 0x2c, 0x28, 0x86, 0xa3, 0x70, 0x57, -+ 0xfe, 0x51, 0x48, 0x52, 0x06, 0xbc, 0xcd, 0x03, 0xe6, 0x7a, 0xd4, 0xe5, 0x2c, 0xa4, 0xec, 0xc1, -+ 0x88, 0x85, 0x9c, 0xdc, 0x80, 0xe5, 0x04, 0x36, 0x1c, 0xfa, 0x83, 0x90, 0xe1, 0xb7, 0xa1, 0x18, -+ 0xc6, 0xe8, 0x0a, 0x5a, 0xc9, 0xac, 0x16, 0xd7, 0xcb, 0x6b, 0x91, 0x2a, 0xf1, 0x1e, 0x6a, 0x13, -+ 0x92, 0x9f, 0x23, 0x80, 0x78, 0x0d, 0xd7, 0x00, 0xd4, 0xea, 0xfb, 0x6e, 0xb8, 0x5b, 0x41, 0x2b, -+ 0x68, 0xd5, 0xa1, 0x16, 0x06, 0x9f, 0x85, 0x23, 0x31, 0x74, 0xd3, 0xdf, 0xde, 0x75, 0x83, 0x76, -+ 0x25, 0x2d, 0xc9, 0x0e, 0x2f, 0x60, 0x0c, 0x4e, 0xe0, 0x72, 0x56, 0xc9, 0xac, 0xa0, 0xd5, 0x0c, -+ 0x95, 0xdf, 0xf8, 0x28, 0xe4, 0x38, 0x1b, 0xb8, 0x03, 0x5e, 0x71, 0x56, 0xd0, 0x6a, 0x81, 0x6a, -+ 0x48, 0xe0, 0x85, 0xee, 0x2c, 0xac, 0x64, 0x57, 0xd0, 0xea, 0x3c, 0xd5, 0x10, 0xf9, 0x73, 0x1a, -+ 0x4a, 0x1f, 0x8c, 0x58, 0xb0, 0xa7, 0x0d, 0x80, 0xab, 0x90, 0x0f, 0x59, 0x9f, 0xb5, 0xb8, 0x1f, -+ 0x48, 0x01, 0x0b, 0x34, 0x82, 0x71, 0x19, 0xb2, 0xfd, 0x9e, 0xd7, 0xe3, 0x52, 0xa4, 0x79, 0xaa, -+ 0x00, 0x7c, 0x01, 0xb2, 0x21, 0x77, 0x03, 0x2e, 0xe5, 0x28, 0xae, 0x57, 0xd7, 0x94, 0xc3, 0xd6, -+ 0x8c, 0xc3, 0xd6, 0xee, 0x18, 0x87, 0x35, 0xf3, 0x8f, 0xc6, 0xf5, 0xd4, 0x67, 0x7f, 0xaf, 0x23, -+ 0xaa, 0xb6, 0xe0, 0xb7, 0x21, 0xc3, 0x06, 0x6d, 0x29, 0xeb, 0xd7, 0xdd, 0x29, 0x36, 0xe0, 0x73, -+ 0x50, 0x68, 0xf7, 0x02, 0xd6, 0xe2, 0x3d, 0x7f, 0x20, 0x35, 0x5a, 0x58, 0x5f, 0x8e, 0xbd, 0xb1, -+ 0x69, 0x96, 0x68, 0x4c, 0x85, 0xcf, 0x42, 0x2e, 0x14, 0x66, 0x0b, 0x2b, 0x73, 0x2b, 0x99, 0xd5, -+ 0x42, 0xb3, 0x7c, 0x30, 0xae, 0x2f, 0x29, 0xcc, 0x59, 0xdf, 0xeb, 0x71, 0xe6, 0x0d, 0xf9, 0x1e, -+ 0xd5, 0x34, 0xf8, 0x34, 0xcc, 0xb5, 0x59, 0x9f, 0x09, 0x67, 0xe7, 0xa5, 0xb3, 0x97, 0x2c, 0xf6, -+ 0x72, 0x81, 0x1a, 0x82, 0xab, 0x4e, 0x3e, 0xb7, 0x34, 0x47, 0xfe, 0x8d, 0x00, 0x6f, 0xbb, 0xde, -+ 0xb0, 0xcf, 0xbe, 0xb6, 0x3d, 0x23, 0xcb, 0xa5, 0x9f, 0xdb, 0x72, 0x99, 0x59, 0x2d, 0x17, 0x9b, -+ 0xc1, 0x99, 0xcd, 0x0c, 0xd9, 0xaf, 0x30, 0x03, 0xb9, 0x0e, 0x39, 0x85, 0xfa, 0xaa, 0x18, 0x8a, -+ 0x75, 0xce, 0x18, 0x6d, 0x96, 0x62, 0x6d, 0x32, 0x52, 0x4e, 0xf2, 0x0b, 0x04, 0xf3, 0xda, 0x90, -+ 0x3a, 0x07, 0x77, 0x60, 0x4e, 0xe5, 0x80, 0xc9, 0xbf, 0x63, 0x93, 0xf9, 0x77, 0xa9, 0xed, 0x0e, -+ 0x39, 0x0b, 0x9a, 0x8d, 0x47, 0xe3, 0x3a, 0xfa, 0xdb, 0xb8, 0x7e, 0xaa, 0xdb, 0xe3, 0xbb, 0xa3, -+ 0x9d, 0xb5, 0x96, 0xef, 0x35, 0xba, 0x81, 0xdb, 0x71, 0x07, 0x6e, 0xa3, 0xef, 0xdf, 0xef, 0x35, -+ 0x4c, 0x3d, 0x30, 0x79, 0x6b, 0x18, 0xe3, 0x33, 0x52, 0x3a, 0x1e, 0x6a, 0x8f, 0x2c, 0xae, 0xa9, -+ 0x32, 0xb2, 0x35, 0xe8, 0xb2, 0x50, 0x70, 0x76, 0x84, 0x31, 0xa9, 0xa2, 0x21, 0x3f, 0x82, 0xe5, -+ 0x84, 0xc3, 0xb5, 0x9c, 0xe7, 0x21, 0x17, 0xb2, 0xa0, 0x17, 0x95, 0x09, 0xcb, 0x64, 0xdb, 0x12, -+ 0xdf, 0x5c, 0xd0, 0xf2, 0xe5, 0x14, 0x4c, 0x35, 0xfd, 0x6c, 0xa7, 0xff, 0x09, 0x41, 0xe9, 0xba, -+ 0xbb, 0xc3, 0xfa, 0x26, 0xd2, 0x30, 0x38, 0x03, 0xd7, 0x63, 0xda, 0xe2, 0xf2, 0x5b, 0xa4, 0xfd, -+ 0xc7, 0x6e, 0x7f, 0xc4, 0x14, 0xcb, 0x3c, 0xd5, 0xd0, 0xac, 0x39, 0x8b, 0x9e, 0x3b, 0x67, 0x51, -+ 0x1c, 0x79, 0x65, 0xc8, 0x3e, 0x10, 0x86, 0x92, 0xf9, 0x5a, 0xa0, 0x0a, 0x20, 0xa7, 0x60, 0x5e, -+ 0x6b, 0xa1, 0xcd, 0x17, 0x8b, 0x2c, 0xcc, 0x57, 0x30, 0x22, 0x13, 0x0f, 0x72, 0xca, 0xda, 0xf8, -+ 0x75, 0x28, 0x44, 0x3d, 0x40, 0x6a, 0x9b, 0x69, 0xe6, 0x0e, 0xc6, 0xf5, 0x34, 0x0f, 0x69, 0xbc, -+ 0x80, 0xeb, 0x90, 0x95, 0x3b, 0xa5, 0xe6, 0xa8, 0x59, 0x38, 0x18, 0xd7, 0x15, 0x82, 0xaa, 0x1f, -+ 0x7c, 0x1c, 0x9c, 0x5d, 0x51, 0x86, 0x85, 0x09, 0x9c, 0x66, 0xfe, 0x60, 0x5c, 0x97, 0x30, 0x95, -+ 0x7f, 0xc9, 0x15, 0x28, 0x5d, 0x67, 0x5d, 0xb7, 0xb5, 0xa7, 0x0f, 0x2d, 0x1b, 0x76, 0xe2, 0x40, -+ 0x64, 0x78, 0x9c, 0x80, 0x52, 0x74, 0xe2, 0x3d, 0x2f, 0xd4, 0x41, 0x5d, 0x8c, 0x70, 0x37, 0x42, -+ 0xf2, 0x33, 0x04, 0xda, 0xcf, 0x98, 0x40, 0xae, 0x2f, 0x74, 0x0d, 0x95, 0x8f, 0x9a, 0x70, 0x30, -+ 0xae, 0x6b, 0x0c, 0xd5, 0xbf, 0xf8, 0x22, 0xcc, 0x85, 0xf2, 0x44, 0xc1, 0x6c, 0x32, 0x7c, 0xe4, -+ 0x42, 0x73, 0x51, 0x84, 0xc1, 0xc1, 0xb8, 0x6e, 0x08, 0xa9, 0xf9, 0xc0, 0x6b, 0x89, 0xfe, 0xa2, -+ 0x14, 0x5b, 0x38, 0x18, 0xd7, 0x2d, 0xac, 0xdd, 0x6f, 0xc8, 0x4f, 0x11, 0x14, 0xef, 0xb8, 0xbd, -+ 0x28, 0x84, 0x22, 0x17, 0x21, 0xcb, 0x45, 0x22, 0x9d, 0xdb, 0xac, 0xef, 0xee, 0x5d, 0xf6, 0x03, -+ 0xc9, 0x73, 0x9e, 0x46, 0x70, 0xdc, 0x12, 0x9c, 0xa9, 0x2d, 0x21, 0x3b, 0x73, 0x61, 0xbb, 0xea, -+ 0xe4, 0xd3, 0x4b, 0x19, 0xf2, 0x5b, 0x04, 0x25, 0x25, 0x99, 0x0e, 0x8b, 0x1f, 0x42, 0x4e, 0x09, -+ 0x2e, 0x65, 0xfb, 0x92, 0xe4, 0x3f, 0x33, 0x4b, 0xe2, 0x6b, 0x9e, 0xf8, 0xbb, 0xb0, 0xd0, 0x0e, -+ 0xfc, 0xe1, 0x90, 0xb5, 0xb7, 0x75, 0x89, 0x49, 0x4f, 0x96, 0x98, 0x4d, 0x7b, 0x9d, 0x4e, 0x90, -+ 0x93, 0xbf, 0x20, 0x98, 0xd7, 0xd9, 0xac, 0x6d, 0x19, 0xd9, 0x00, 0x3d, 0x77, 0x71, 0x4f, 0xcf, -+ 0x5a, 0xdc, 0x8f, 0x42, 0xae, 0x1b, 0xf8, 0xa3, 0x61, 0x58, 0xc9, 0xa8, 0xdc, 0x51, 0xd0, 0x6c, -+ 0x45, 0x9f, 0x5c, 0x85, 0x05, 0xa3, 0xca, 0x33, 0x4a, 0x5a, 0x75, 0xb2, 0xa4, 0x6d, 0xb5, 0xd9, -+ 0x80, 0xf7, 0x3a, 0xbd, 0xa8, 0x48, 0x69, 0x7a, 0xf2, 0x29, 0x82, 0xa5, 0x49, 0x12, 0xfc, 0x1d, -+ 0x2b, 0x0f, 0x04, 0xbb, 0x93, 0xcf, 0x66, 0xb7, 0x26, 0x8b, 0x43, 0xf8, 0xde, 0x80, 0x07, 0x7b, -+ 0x26, 0x47, 0xaa, 0xef, 0x40, 0xd1, 0x42, 0x8b, 0xe6, 0x71, 0x9f, 0x99, 0x98, 0x15, 0x9f, 0x71, -+ 0xb2, 0xa6, 0x55, 0x1c, 0x4b, 0xe0, 0x42, 0xfa, 0x3c, 0x12, 0x11, 0x3f, 0x9f, 0xf0, 0x24, 0x3e, -+ 0x0f, 0x4e, 0x27, 0xf0, 0xbd, 0x99, 0xdc, 0x24, 0x77, 0xe0, 0x6f, 0x40, 0x9a, 0xfb, 0x33, 0x39, -+ 0x29, 0xcd, 0x7d, 0xe1, 0x23, 0xad, 0x7c, 0x46, 0xdd, 0xd0, 0x14, 0x44, 0x7e, 0x83, 0x60, 0x51, -+ 0xec, 0x51, 0x16, 0xd8, 0xd8, 0x1d, 0x0d, 0xee, 0xe3, 0x55, 0x58, 0x12, 0x27, 0xdd, 0xeb, 0xe9, -+ 0x0e, 0x70, 0xaf, 0xd7, 0xd6, 0x6a, 0x2e, 0x08, 0xbc, 0x69, 0x0c, 0x5b, 0x6d, 0x7c, 0x0c, 0xe6, -+ 0x46, 0xa1, 0x22, 0x50, 0x3a, 0xe7, 0x04, 0xb8, 0xd5, 0xc6, 0x67, 0xac, 0xe3, 0x84, 0xad, 0xad, -+ 0x6b, 0x92, 0xb4, 0xe1, 0x6d, 0xb7, 0x17, 0x44, 0xc5, 0xe7, 0x14, 0xe4, 0x5a, 0xe2, 0x60, 0x15, -+ 0x27, 0xa2, 0x03, 0x45, 0xc4, 0x52, 0x20, 0xaa, 0x97, 0xc9, 0x37, 0xa1, 0x10, 0xed, 0x9e, 0xda, -+ 0x78, 0xa6, 0x7a, 0x80, 0x5c, 0x84, 0x45, 0x55, 0x54, 0xa7, 0x6f, 0x2e, 0x4d, 0xdb, 0x5c, 0x32, -+ 0x9b, 0x5f, 0x85, 0xac, 0xb2, 0x0a, 0x06, 0xa7, 0xed, 0x72, 0xd7, 0x6c, 0x11, 0xdf, 0xa4, 0x02, -+ 0x47, 0xef, 0x04, 0xee, 0x20, 0xec, 0xb0, 0x40, 0x12, 0x45, 0xb1, 0x4b, 0x5e, 0x81, 0x65, 0x51, -+ 0x48, 0x58, 0x10, 0x6e, 0xf8, 0xa3, 0x01, 0x37, 0x17, 0xfd, 0xb3, 0x50, 0x4e, 0xa2, 0x75, 0xa8, -+ 0x97, 0x21, 0xdb, 0x12, 0x08, 0xc9, 0x7d, 0x9e, 0x2a, 0x80, 0xfc, 0x12, 0x01, 0xbe, 0xc2, 0xb8, -+ 0x64, 0xbd, 0xb5, 0x19, 0x5a, 0x97, 0x3b, 0xcf, 0xe5, 0xad, 0x5d, 0x16, 0x84, 0xe6, 0xa2, 0x63, -+ 0xe0, 0xff, 0xc5, 0xe5, 0x8e, 0x9c, 0x83, 0xe5, 0x84, 0x94, 0x5a, 0xa7, 0x2a, 0xe4, 0x5b, 0x1a, -+ 0xa7, 0x9b, 0x6a, 0x04, 0x93, 0xdf, 0xa5, 0x21, 0xaf, 0x7c, 0xcb, 0x3a, 0xf8, 0x1c, 0x14, 0x3b, -+ 0x22, 0xd6, 0x82, 0x61, 0xd0, 0xd3, 0x26, 0x70, 0x9a, 0x8b, 0x07, 0xe3, 0xba, 0x8d, 0xa6, 0x36, -+ 0x80, 0xdf, 0x98, 0x08, 0xbc, 0x66, 0x79, 0x7f, 0x5c, 0xcf, 0x7d, 0x4f, 0x04, 0xdf, 0xa6, 0x68, -+ 0x6f, 0x32, 0x0c, 0x37, 0xa3, 0x70, 0xbc, 0xa6, 0xb3, 0x4d, 0xde, 0xf4, 0x9a, 0xdf, 0x12, 0xe2, -+ 0x4f, 0xd4, 0xeb, 0x61, 0xe0, 0x7b, 0x8c, 0xef, 0xb2, 0x51, 0xd8, 0x68, 0xf9, 0x9e, 0xe7, 0x0f, -+ 0x1a, 0x72, 0xac, 0x93, 0x4a, 0x8b, 0x1e, 0x2d, 0xb6, 0xeb, 0x04, 0xbc, 0x03, 0x73, 0x7c, 0x37, -+ 0xf0, 0x47, 0xdd, 0x5d, 0xd9, 0x7e, 0x32, 0xcd, 0x0b, 0xb3, 0xf3, 0x33, 0x1c, 0xa8, 0xf9, 0xc0, -+ 0x27, 0x84, 0xb5, 0x58, 0xeb, 0x7e, 0x38, 0xf2, 0xd4, 0xb0, 0xd4, 0xcc, 0x1e, 0x8c, 0xeb, 0xe8, -+ 0x0d, 0x1a, 0xa1, 0xc9, 0xa7, 0x69, 0xa8, 0xcb, 0x10, 0xbe, 0x2b, 0xef, 0x26, 0x97, 0xfd, 0xe0, -+ 0x06, 0xe3, 0x41, 0xaf, 0x75, 0xd3, 0xf5, 0x98, 0x89, 0x8d, 0x3a, 0x14, 0x3d, 0x89, 0xbc, 0x67, -+ 0x25, 0x07, 0x78, 0x11, 0x1d, 0x7e, 0x0d, 0x40, 0xa6, 0x9d, 0x5a, 0x57, 0x79, 0x52, 0x90, 0x18, -+ 0xb9, 0xbc, 0x91, 0xb0, 0x54, 0x63, 0x46, 0xcd, 0xb4, 0x85, 0xb6, 0x26, 0x2d, 0x34, 0x33, 0x9f, -+ 0xc8, 0x2c, 0x76, 0xac, 0x67, 0x93, 0xb1, 0x4e, 0xfe, 0x8a, 0xa0, 0x76, 0xdd, 0x48, 0xfe, 0x9c, -+ 0xe6, 0x30, 0xfa, 0xa6, 0x5f, 0x90, 0xbe, 0x99, 0xff, 0x4e, 0x5f, 0xf2, 0x47, 0x2b, 0xe5, 0x29, -+ 0xeb, 0x18, 0x3d, 0x36, 0xac, 0x76, 0xf1, 0x22, 0xc4, 0x4c, 0xbf, 0x40, 0xb7, 0x64, 0x26, 0xdc, -+ 0xf2, 0x6e, 0x5c, 0x0e, 0xa4, 0x06, 0xba, 0x1c, 0x9c, 0x04, 0x27, 0x60, 0x1d, 0xd3, 0x7c, 0xf1, -+ 0x64, 0x8d, 0x67, 0x1d, 0x2a, 0xd7, 0xc9, 0xef, 0x11, 0x2c, 0x5d, 0x61, 0x3c, 0x79, 0xad, 0x79, -+ 0x99, 0xf4, 0x7f, 0x1f, 0x8e, 0x58, 0xf2, 0x6b, 0xed, 0xdf, 0x9a, 0xb8, 0xcb, 0xbc, 0x12, 0xeb, -+ 0xbf, 0x35, 0x68, 0xb3, 0x4f, 0xf4, 0x8c, 0x96, 0xbc, 0xc6, 0xdc, 0x86, 0xa2, 0xb5, 0x88, 0x2f, -+ 0x4d, 0x5c, 0x60, 0xa6, 0x35, 0xd5, 0x66, 0x59, 0xeb, 0xa4, 0xa6, 0x34, 0x7d, 0x3d, 0x8d, 0xda, -+ 0xfd, 0x36, 0x60, 0x39, 0x36, 0x4a, 0xb6, 0x76, 0xa5, 0x96, 0xd8, 0x6b, 0xd1, 0x7d, 0x26, 0x82, -+ 0xf1, 0x09, 0x70, 0x02, 0xff, 0xa1, 0xb9, 0x99, 0xce, 0xc7, 0x47, 0x52, 0xff, 0x21, 0x95, 0x4b, -+ 0xe4, 0x22, 0x64, 0xa8, 0xff, 0x10, 0xd7, 0x00, 0x02, 0x77, 0xd0, 0x65, 0x77, 0xa3, 0x81, 0xa5, -+ 0x44, 0x2d, 0xcc, 0x33, 0xfa, 0xeb, 0x06, 0x1c, 0xb1, 0x25, 0x52, 0xee, 0x5e, 0x83, 0x39, 0x81, -+ 0xec, 0x4d, 0x7b, 0xf4, 0x92, 0x84, 0x6a, 0xf6, 0x35, 0x44, 0x22, 0x66, 0x20, 0xc6, 0xe3, 0xe3, -+ 0x50, 0xe0, 0xee, 0x4e, 0x9f, 0xdd, 0x8c, 0x73, 0x3e, 0x46, 0x88, 0x55, 0x31, 0x6b, 0xdd, 0xb5, -+ 0x2e, 0x0a, 0x31, 0x02, 0x9f, 0x86, 0xa5, 0x58, 0xe6, 0xdb, 0x01, 0xeb, 0xf4, 0x3e, 0x91, 0x1e, -+ 0x2e, 0xd1, 0x43, 0x78, 0xbc, 0x0a, 0x8b, 0x31, 0x6e, 0x5b, 0xb6, 0x5d, 0x47, 0x92, 0x4e, 0xa2, -+ 0x85, 0x6d, 0xa4, 0xba, 0xef, 0x3d, 0x18, 0xb9, 0x7d, 0x59, 0xc8, 0x4a, 0xd4, 0xc2, 0x90, 0x3f, -+ 0x20, 0x38, 0xa2, 0x5c, 0x2d, 0xa6, 0xec, 0x97, 0x31, 0xea, 0x7f, 0x85, 0x00, 0xdb, 0x1a, 0xe8, -+ 0xd0, 0xfa, 0x7f, 0xfb, 0xf9, 0x44, 0xf4, 0xf5, 0xa2, 0x1c, 0x21, 0x15, 0x2a, 0x7e, 0x01, 0x21, -+ 0xd1, 0x15, 0x50, 0xbe, 0x3b, 0xaa, 0x19, 0x55, 0x61, 0xcc, 0xed, 0x4f, 0x8c, 0xd6, 0x3b, 0x7b, -+ 0x9c, 0x85, 0x7a, 0xc2, 0x94, 0xa3, 0xb5, 0x44, 0x50, 0xf5, 0x23, 0xce, 0x62, 0x03, 0x2e, 0xa3, -+ 0xc6, 0x89, 0xcf, 0xd2, 0x28, 0x6a, 0x3e, 0xc8, 0x4f, 0xd2, 0x30, 0x7f, 0xd7, 0xef, 0x8f, 0xe2, -+ 0x2e, 0xf1, 0x12, 0xd9, 0x39, 0x39, 0xfa, 0x66, 0xcd, 0xe8, 0x8b, 0xc1, 0x09, 0x39, 0x1b, 0xca, -+ 0xc8, 0xca, 0x50, 0xf9, 0x8d, 0x09, 0x94, 0xb8, 0x1b, 0x74, 0x19, 0x57, 0x53, 0x4b, 0x25, 0x27, -+ 0xef, 0x60, 0x09, 0x1c, 0xf9, 0x01, 0x2c, 0x18, 0x53, 0x68, 0x87, 0xbd, 0x09, 0x73, 0x1f, 0x4b, -+ 0xcc, 0x94, 0x87, 0x24, 0x45, 0xaa, 0x8b, 0x94, 0x21, 0x4b, 0xbe, 0xcf, 0x1a, 0x89, 0xc8, 0x55, -+ 0xc8, 0x29, 0x72, 0x7c, 0xdc, 0xbe, 0xa8, 0xab, 0x17, 0x0f, 0x01, 0xeb, 0x5b, 0x37, 0x81, 0x9c, -+ 0x62, 0xa4, 0xdd, 0x2a, 0x3d, 0xaf, 0x30, 0x54, 0xff, 0x9e, 0x3e, 0x09, 0x85, 0xe8, 0x71, 0x15, -+ 0x17, 0x61, 0xee, 0xf2, 0x2d, 0xfa, 0xfd, 0x4b, 0x74, 0x73, 0x29, 0x85, 0x4b, 0x90, 0x6f, 0x5e, -+ 0xda, 0xb8, 0x26, 0x21, 0xb4, 0xfe, 0x2f, 0xc7, 0xd4, 0x8d, 0x00, 0x7f, 0x1b, 0xb2, 0xaa, 0x18, -+ 0x1c, 0x8d, 0xe5, 0xb7, 0x9f, 0x48, 0xab, 0xc7, 0x0e, 0xe1, 0xf5, 0xd5, 0x3d, 0xf5, 0x26, 0xc2, -+ 0x37, 0xa1, 0x28, 0x91, 0xfa, 0x19, 0xe6, 0xf8, 0xe4, 0x6b, 0x48, 0x82, 0xd3, 0x6b, 0xcf, 0x58, -+ 0xb5, 0xf8, 0x5d, 0x80, 0xac, 0xb4, 0xb8, 0x2d, 0x8d, 0xfd, 0x8c, 0x66, 0x4b, 0x93, 0x78, 0x98, -+ 0x22, 0x29, 0xfc, 0x0e, 0x38, 0x62, 0x66, 0xc0, 0x56, 0xcb, 0xb0, 0x5e, 0x4f, 0xaa, 0x47, 0x27, -+ 0xd1, 0xd6, 0xb1, 0xef, 0x46, 0x8f, 0x40, 0xc7, 0x26, 0x87, 0x5d, 0xb3, 0xbd, 0x72, 0x78, 0x21, -+ 0x3a, 0xf9, 0x96, 0x7a, 0x0d, 0x31, 0xd3, 0x0a, 0x7e, 0x2d, 0x79, 0xd4, 0xc4, 0x70, 0x53, 0xad, -+ 0x3d, 0x6b, 0x39, 0x62, 0x78, 0x1d, 0x8a, 0xd6, 0xa4, 0x60, 0x9b, 0xf5, 0xf0, 0x98, 0x63, 0x9b, -+ 0x75, 0xca, 0x78, 0x41, 0x52, 0xf8, 0x0a, 0xe4, 0x45, 0xa3, 0x15, 0xf5, 0x06, 0xbf, 0x3a, 0xd9, -+ 0x4f, 0xad, 0x3a, 0x5a, 0x3d, 0x3e, 0x7d, 0x31, 0x62, 0x74, 0x19, 0x16, 0xa3, 0x8e, 0xad, 0x83, -+ 0xf6, 0xd8, 0x64, 0xd4, 0x4f, 0xb1, 0x57, 0x32, 0x73, 0x48, 0x6a, 0xfd, 0x23, 0xc8, 0x9b, 0xe1, -+ 0x18, 0x7f, 0x00, 0x0b, 0xc9, 0xd1, 0x10, 0xff, 0x9f, 0x65, 0x9e, 0xe4, 0xc4, 0x5d, 0x5d, 0xb1, -+ 0x96, 0xa6, 0xcf, 0x93, 0xa9, 0x55, 0xb4, 0xfe, 0x91, 0xf9, 0xaf, 0xce, 0xa6, 0xcb, 0x5d, 0x7c, -+ 0x0b, 0x16, 0xa4, 0xf6, 0xd1, 0xbf, 0x7d, 0x12, 0x51, 0x7a, 0xe8, 0x7f, 0x4c, 0x89, 0x28, 0x3d, -+ 0xfc, 0xbf, 0x26, 0x92, 0x6a, 0x7e, 0xf8, 0xf8, 0x49, 0x2d, 0xf5, 0xf9, 0x93, 0x5a, 0xea, 0x8b, -+ 0x27, 0x35, 0xf4, 0xe3, 0xfd, 0x1a, 0xfa, 0xf5, 0x7e, 0x0d, 0x3d, 0xda, 0xaf, 0xa1, 0xc7, 0xfb, -+ 0x35, 0xf4, 0x8f, 0xfd, 0x1a, 0xfa, 0xe7, 0x7e, 0x2d, 0xf5, 0xc5, 0x7e, 0x0d, 0x7d, 0xf6, 0xb4, -+ 0x96, 0x7a, 0xfc, 0xb4, 0x96, 0xfa, 0xfc, 0x69, 0x2d, 0xf5, 0xe1, 0xeb, 0x5f, 0xf6, 0xe8, 0x65, -+ 0x4e, 0xdc, 0xc9, 0xc9, 0x9f, 0xb7, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0x57, 0xa3, 0x10, 0x6c, -+ 0x94, 0x1b, 0x00, 0x00, - } - - func (x Direction) String() string { -@@ -4009,6 +4018,14 @@ func (this *VolumeRequest) Equal(that interface{}) bool { - if this.Step != that1.Step { - return false - } -+ if len(this.TargetLabels) != len(that1.TargetLabels) { -+ return false -+ } -+ for i := range this.TargetLabels { -+ if this.TargetLabels[i] != that1.TargetLabels[i] { -+ return false -+ } -+ } - return true - } - func (this *VolumeResponse) Equal(that interface{}) bool { -@@ -4607,13 +4624,14 @@ func (this *VolumeRequest) GoString() string { - if this == nil { - return ""nil"" - } -- s := make([]string, 0, 9) -+ s := make([]string, 0, 10) - s = append(s, ""&logproto.VolumeRequest{"") - s = append(s, ""From: ""+fmt.Sprintf(""%#v"", this.From)+"",\n"") - s = append(s, ""Through: ""+fmt.Sprintf(""%#v"", this.Through)+"",\n"") - s = append(s, ""Matchers: ""+fmt.Sprintf(""%#v"", this.Matchers)+"",\n"") - s = append(s, ""Limit: ""+fmt.Sprintf(""%#v"", this.Limit)+"",\n"") - s = append(s, ""Step: ""+fmt.Sprintf(""%#v"", this.Step)+"",\n"") -+ s = append(s, ""TargetLabels: ""+fmt.Sprintf(""%#v"", this.TargetLabels)+"",\n"") - s = append(s, ""}"") - return strings.Join(s, """") - } -@@ -7117,6 +7135,15 @@ func (m *VolumeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - _ = i - var l int - _ = l -+ if len(m.TargetLabels) > 0 { -+ for iNdEx := len(m.TargetLabels) - 1; iNdEx >= 0; iNdEx-- { -+ i -= len(m.TargetLabels[iNdEx]) -+ copy(dAtA[i:], m.TargetLabels[iNdEx]) -+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.TargetLabels[iNdEx]))) -+ i-- -+ dAtA[i] = 0x32 -+ } -+ } - if m.Step != 0 { - i = encodeVarintLogproto(dAtA, i, uint64(m.Step)) - i-- -@@ -8047,6 +8074,12 @@ func (m *VolumeRequest) Size() (n int) { - if m.Step != 0 { - n += 1 + sovLogproto(uint64(m.Step)) - } -+ if len(m.TargetLabels) > 0 { -+ for _, s := range m.TargetLabels { -+ l = len(s) -+ n += 1 + l + sovLogproto(uint64(l)) -+ } -+ } - return n - } - -@@ -8655,6 +8688,7 @@ func (this *VolumeRequest) String() string { - `Matchers:` + fmt.Sprintf(""%v"", this.Matchers) + `,`, - `Limit:` + fmt.Sprintf(""%v"", this.Limit) + `,`, - `Step:` + fmt.Sprintf(""%v"", this.Step) + `,`, -+ `TargetLabels:` + fmt.Sprintf(""%v"", this.TargetLabels) + `,`, - `}`, - }, """") - return s -@@ -14164,6 +14198,38 @@ func (m *VolumeRequest) Unmarshal(dAtA []byte) error { - break - } - } -+ case 6: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field TargetLabels"", wireType) -+ } -+ var stringLen uint64 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowLogproto -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ stringLen |= uint64(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ intStringLen := int(stringLen) -+ if intStringLen < 0 { -+ return ErrInvalidLengthLogproto -+ } -+ postIndex := iNdEx + intStringLen -+ if postIndex < 0 { -+ return ErrInvalidLengthLogproto -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ m.TargetLabels = append(m.TargetLabels, string(dAtA[iNdEx:postIndex])) -+ iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLogproto(dAtA[iNdEx:]) -diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto -index 614444d873507..fc701cba8ee2c 100644 ---- a/pkg/logproto/logproto.proto -+++ b/pkg/logproto/logproto.proto -@@ -393,6 +393,7 @@ message VolumeRequest { - string matchers = 3; - int32 limit = 4; - int64 step = 5; -+ repeated string targetLabels = 6; - } - - message VolumeResponse { -diff --git a/pkg/querier/http.go b/pkg/querier/http.go -index de1b79928dedf..eae7f7eb3dd26 100644 ---- a/pkg/querier/http.go -+++ b/pkg/querier/http.go -@@ -445,11 +445,12 @@ func (q *QuerierAPI) SeriesVolumeRangeHandler(w http.ResponseWriter, r *http.Req - } - - req := &logproto.VolumeRequest{ -- From: model.TimeFromUnixNano(rawReq.Start.UnixNano()), -- Through: model.TimeFromUnixNano(rawReq.End.UnixNano()), -- Matchers: rawReq.Query, -- Step: rawReq.Step.Milliseconds(), -- Limit: int32(rawReq.Limit), -+ From: model.TimeFromUnixNano(rawReq.Start.UnixNano()), -+ Through: model.TimeFromUnixNano(rawReq.End.UnixNano()), -+ Matchers: rawReq.Query, -+ Step: rawReq.Step.Milliseconds(), -+ Limit: int32(rawReq.Limit), -+ TargetLabels: rawReq.TargetLabels, - } - - q.seriesVolumeHandler(r.Context(), r, req, w) -@@ -466,11 +467,12 @@ func (q *QuerierAPI) SeriesVolumeInstantHandler(w http.ResponseWriter, r *http.R - } - - req := &logproto.VolumeRequest{ -- From: model.TimeFromUnixNano(rawReq.Start.UnixNano()), -- Through: model.TimeFromUnixNano(rawReq.End.UnixNano()), -- Matchers: rawReq.Query, -- Step: 0, -- Limit: int32(rawReq.Limit), -+ From: model.TimeFromUnixNano(rawReq.Start.UnixNano()), -+ Through: model.TimeFromUnixNano(rawReq.End.UnixNano()), -+ Matchers: rawReq.Query, -+ Step: 0, -+ Limit: int32(rawReq.Limit), -+ TargetLabels: rawReq.TargetLabels, - } - - q.seriesVolumeHandler(r.Context(), r, req, w) -diff --git a/pkg/querier/http_test.go b/pkg/querier/http_test.go -index d4151ff3a890b..64745d4611696 100644 ---- a/pkg/querier/http_test.go -+++ b/pkg/querier/http_test.go -@@ -524,7 +524,7 @@ func TestResponseFormat(t *testing.T) { - logproto.Stream{ - Entries: []logproto.Entry{ - { -- Timestamp: time.Unix(0, 123456789012345), -+ Timestamp: time.Unix(0, 123456789012345).UTC(), - Line: ""super line"", - }, - }, -@@ -558,7 +558,7 @@ func TestResponseFormat(t *testing.T) { - logproto.Stream{ - Entries: []logproto.Entry{ - { -- Timestamp: time.Unix(0, 123456789012345), -+ Timestamp: time.Unix(0, 123456789012345).UTC(), - Line: ""super line"", - }, - }, -diff --git a/pkg/querier/ingester_querier.go b/pkg/querier/ingester_querier.go -index f448d13a6dfb8..0c00a5b35e9e5 100644 ---- a/pkg/querier/ingester_querier.go -+++ b/pkg/querier/ingester_querier.go -@@ -319,7 +319,7 @@ func (q *IngesterQuerier) Stats(ctx context.Context, _ string, from, through mod - return &merged, nil - } - --func (q *IngesterQuerier) SeriesVolume(ctx context.Context, _ string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { -+func (q *IngesterQuerier) SeriesVolume(ctx context.Context, _ string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { - matcherString := ""{}"" - if len(matchers) > 0 { - matcherString = syntax.MatchersString(matchers) -@@ -327,10 +327,11 @@ func (q *IngesterQuerier) SeriesVolume(ctx context.Context, _ string, from, thro - - resps, err := q.forAllIngesters(ctx, func(ctx context.Context, querierClient logproto.QuerierClient) (interface{}, error) { - return querierClient.GetSeriesVolume(ctx, &logproto.VolumeRequest{ -- From: from, -- Through: through, -- Matchers: matcherString, -- Limit: limit, -+ From: from, -+ Through: through, -+ Matchers: matcherString, -+ Limit: limit, -+ TargetLabels: targetLabels, - }) - }) - -diff --git a/pkg/querier/ingester_querier_test.go b/pkg/querier/ingester_querier_test.go -index 6426bfee8487d..d75c652132920 100644 ---- a/pkg/querier/ingester_querier_test.go -+++ b/pkg/querier/ingester_querier_test.go -@@ -365,7 +365,7 @@ func TestIngesterQuerier_SeriesVolume(t *testing.T) { - ) - require.NoError(t, err) - -- volumes, err := ingesterQuerier.SeriesVolume(context.Background(), """", 0, 1, 10) -+ volumes, err := ingesterQuerier.SeriesVolume(context.Background(), """", 0, 1, 10, nil) - require.NoError(t, err) - - require.Equal(t, []logproto.Volume{ -@@ -385,7 +385,7 @@ func TestIngesterQuerier_SeriesVolume(t *testing.T) { - ) - require.NoError(t, err) - -- volumes, err := ingesterQuerier.SeriesVolume(context.Background(), """", 0, 1, 10) -+ volumes, err := ingesterQuerier.SeriesVolume(context.Background(), """", 0, 1, 10, nil) - require.NoError(t, err) - - require.Equal(t, []logproto.Volume(nil), volumes.Volumes) -diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go -index cbbaf5294ee88..e716ad7e3d51c 100644 ---- a/pkg/querier/querier.go -+++ b/pkg/querier/querier.go -@@ -800,6 +800,7 @@ func (q *SingleTenantQuerier) SeriesVolume(ctx context.Context, req *logproto.Vo - ""through"", req.Through.Time(), - ""matchers"", syntax.MatchersString(matchers), - ""limit"", req.Limit, -+ ""targetLabels"", req.TargetLabels, - ) - - ingesterQueryInterval, storeQueryInterval := q.buildQueryIntervals(req.From.Time(), req.Through.Time()) -@@ -826,6 +827,7 @@ func (q *SingleTenantQuerier) SeriesVolume(ctx context.Context, req *logproto.Vo - model.TimeFromUnix(ingesterQueryInterval.start.Unix()), - model.TimeFromUnix(ingesterQueryInterval.end.Unix()), - req.Limit, -+ req.TargetLabels, - matchers..., - ) - if err != nil { -@@ -842,6 +844,7 @@ func (q *SingleTenantQuerier) SeriesVolume(ctx context.Context, req *logproto.Vo - model.TimeFromUnix(storeQueryInterval.start.Unix()), - model.TimeFromUnix(storeQueryInterval.end.Unix()), - req.Limit, -+ req.TargetLabels, - matchers..., - ) - if err != nil { -diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go -index c12f4e3af9b31..a1b17f8334a28 100644 ---- a/pkg/querier/querier_mock_test.go -+++ b/pkg/querier/querier_mock_test.go -@@ -367,8 +367,8 @@ func (s *storeMock) Stats(_ context.Context, _ string, _, _ model.Time, _ ...*la - return nil, nil - } - --func (s *storeMock) SeriesVolume(ctx context.Context, userID string, from, through model.Time, _ int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { -- args := s.Called(ctx, userID, from, through, matchers) -+func (s *storeMock) SeriesVolume(ctx context.Context, userID string, from, through model.Time, _ int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { -+ args := s.Called(ctx, userID, from, through, targetLabels, matchers) - return args.Get(0).(*logproto.VolumeResponse), args.Error(1) - } - -diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go -index 2bb842e8df786..c3fb27d9bed7f 100644 ---- a/pkg/querier/querier_test.go -+++ b/pkg/querier/querier_test.go -@@ -977,7 +977,7 @@ func TestQuerier_SeriesVolumes(t *testing.T) { - - ingesterClient := newQuerierClientMock() - store := newStoreMock() -- store.On(""SeriesVolume"", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(ret, nil) -+ store.On(""SeriesVolume"", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(ret, nil) - - conf := mockQuerierConfig() - conf.QueryIngestersWithin = time.Minute * 30 -@@ -1050,7 +1050,7 @@ func TestQuerier_SeriesVolumes(t *testing.T) { - ingesterClient.On(""GetSeriesVolume"", mock.Anything, mock.Anything, mock.Anything).Return(ret, nil) - - store := newStoreMock() -- store.On(""SeriesVolume"", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(ret, nil) -+ store.On(""SeriesVolume"", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(ret, nil) - - conf := mockQuerierConfig() - conf.QueryIngestersWithin = time.Minute * 30 -diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go -index a04514b45afb9..4ba2e36d961ac 100644 ---- a/pkg/querier/queryrange/codec.go -+++ b/pkg/querier/queryrange/codec.go -@@ -282,11 +282,12 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer - } - from, through := util.RoundToMilliseconds(req.Start, req.End) - return &logproto.VolumeRequest{ -- From: from, -- Through: through, -- Matchers: req.Query, -- Limit: int32(req.Limit), -- Step: 0, -+ From: from, -+ Through: through, -+ Matchers: req.Query, -+ Limit: int32(req.Limit), -+ Step: 0, -+ TargetLabels: req.TargetLabels, - }, err - case SeriesVolumeRangeOp: - req, err := loghttp.ParseSeriesVolumeRangeQuery(r) -@@ -295,11 +296,12 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer - } - from, through := util.RoundToMilliseconds(req.Start, req.End) - return &logproto.VolumeRequest{ -- From: from, -- Through: through, -- Matchers: req.Query, -- Limit: int32(req.Limit), -- Step: req.Step.Milliseconds(), -+ From: from, -+ Through: through, -+ Matchers: req.Query, -+ Limit: int32(req.Limit), -+ Step: req.Step.Milliseconds(), -+ TargetLabels: req.TargetLabels, - }, err - default: - return nil, httpgrpc.Errorf(http.StatusBadRequest, fmt.Sprintf(""unknown request path: %s"", r.URL.Path)) -@@ -440,6 +442,10 @@ func (c Codec) EncodeRequest(ctx context.Context, r queryrangebase.Request) (*ht - ""limit"": []string{fmt.Sprintf(""%d"", request.Limit)}, - } - -+ if len(request.TargetLabels) > 0 { -+ params[""targetLabels""] = []string{strings.Join(request.TargetLabels, "","")} -+ } -+ - var u *url.URL - if request.Step != 0 { - params[""step""] = []string{fmt.Sprintf(""%f"", float64(request.Step)/float64(1e3))} -diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go -index 18499b75427ca..cf013dfeac01b 100644 ---- a/pkg/querier/queryrange/codec_test.go -+++ b/pkg/querier/queryrange/codec_test.go -@@ -36,7 +36,7 @@ var ( - end = start.Add(1 * time.Hour) - ) - --func Test_codec_DecodeRequest(t *testing.T) { -+func Test_codec_EncodeDecodeRequest(t *testing.T) { - tests := []struct { - name string - reqBuilder func() (*http.Request, error) -@@ -108,18 +108,20 @@ func Test_codec_DecodeRequest(t *testing.T) { - }, false}, - {""series_volume"", func() (*http.Request, error) { - return DefaultCodec.EncodeRequest(context.Background(), &logproto.VolumeRequest{ -- From: model.TimeFromUnixNano(start.UnixNano()), -- Through: model.TimeFromUnixNano(end.UnixNano()), -- Matchers: `{job=""foo""}`, -- Limit: 3, -- Step: 0, -+ From: model.TimeFromUnixNano(start.UnixNano()), -+ Through: model.TimeFromUnixNano(end.UnixNano()), -+ Matchers: `{job=""foo""}`, -+ Limit: 3, -+ Step: 0, -+ TargetLabels: []string{""job""}, - }) - }, &logproto.VolumeRequest{ -- From: model.TimeFromUnixNano(start.UnixNano()), -- Through: model.TimeFromUnixNano(end.UnixNano()), -- Matchers: `{job=""foo""}`, -- Limit: 3, -- Step: 0, -+ From: model.TimeFromUnixNano(start.UnixNano()), -+ Through: model.TimeFromUnixNano(end.UnixNano()), -+ Matchers: `{job=""foo""}`, -+ Limit: 3, -+ Step: 0, -+ TargetLabels: []string{""job""}, - }, false}, - {""series_volume_default_limit"", func() (*http.Request, error) { - return DefaultCodec.EncodeRequest(context.Background(), &logproto.VolumeRequest{ -@@ -136,18 +138,20 @@ func Test_codec_DecodeRequest(t *testing.T) { - }, false}, - {""series_volume_range"", func() (*http.Request, error) { - return DefaultCodec.EncodeRequest(context.Background(), &logproto.VolumeRequest{ -- From: model.TimeFromUnixNano(start.UnixNano()), -- Through: model.TimeFromUnixNano(end.UnixNano()), -- Matchers: `{job=""foo""}`, -- Limit: 3, -- Step: 30 * 1e3, -+ From: model.TimeFromUnixNano(start.UnixNano()), -+ Through: model.TimeFromUnixNano(end.UnixNano()), -+ Matchers: `{job=""foo""}`, -+ Limit: 3, -+ Step: 30 * 1e3, -+ TargetLabels: []string{""fizz"", ""buzz""}, - }) - }, &logproto.VolumeRequest{ -- From: model.TimeFromUnixNano(start.UnixNano()), -- Through: model.TimeFromUnixNano(end.UnixNano()), -- Matchers: `{job=""foo""}`, -- Limit: 3, -- Step: 30 * 1e3, // step is expected in ms -+ From: model.TimeFromUnixNano(start.UnixNano()), -+ Through: model.TimeFromUnixNano(end.UnixNano()), -+ Matchers: `{job=""foo""}`, -+ Limit: 3, -+ Step: 30 * 1e3, // step is expected in ms -+ TargetLabels: []string{""fizz"", ""buzz""}, - }, false}, - {""series_volume_range_default_limit"", func() (*http.Request, error) { - return DefaultCodec.EncodeRequest(context.Background(), &logproto.VolumeRequest{ -@@ -175,7 +179,7 @@ func Test_codec_DecodeRequest(t *testing.T) { - t.Errorf(""codec.DecodeRequest() error = %v, wantErr %v"", err, tt.wantErr) - return - } -- require.Equal(t, got, tt.want) -+ require.Equal(t, tt.want, got) - }) - } - } -@@ -698,11 +702,12 @@ func Test_codec_index_stats_EncodeRequest(t *testing.T) { - func Test_codec_seriesVolume_EncodeRequest(t *testing.T) { - from, through := util.RoundToMilliseconds(start, end) - toEncode := &logproto.VolumeRequest{ -- From: from, -- Through: through, -- Matchers: `{job=""foo""}`, -- Limit: 20, -- Step: 30 * 1e6, -+ From: from, -+ Through: through, -+ Matchers: `{job=""foo""}`, -+ Limit: 20, -+ Step: 30 * 1e6, -+ TargetLabels: []string{""foo"", ""bar""}, - } - got, err := DefaultCodec.EncodeRequest(context.Background(), toEncode) - require.Nil(t, err) -@@ -711,6 +716,7 @@ func Test_codec_seriesVolume_EncodeRequest(t *testing.T) { - require.Equal(t, `{job=""foo""}`, got.URL.Query().Get(""query"")) - require.Equal(t, ""20"", got.URL.Query().Get(""limit"")) - require.Equal(t, fmt.Sprintf(""%f"", float64(toEncode.Step/1e3)), got.URL.Query().Get(""step"")) -+ require.Equal(t, `foo,bar`, got.URL.Query().Get(""targetLabels"")) - } - - func Test_codec_EncodeResponse(t *testing.T) { -diff --git a/pkg/querier/queryrange/queryrange.pb.go b/pkg/querier/queryrange/queryrange.pb.go -index 152633ef47262..09fd34933329a 100644 ---- a/pkg/querier/queryrange/queryrange.pb.go -+++ b/pkg/querier/queryrange/queryrange.pb.go -@@ -17,6 +17,7 @@ import ( - queryrangebase ""github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"" - _ ""github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions"" - github_com_grafana_loki_pkg_querier_queryrange_queryrangebase_definitions ""github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions"" -+ github_com_prometheus_common_model ""github.com/prometheus/common/model"" - io ""io"" - math ""math"" - math_bits ""math/bits"" -@@ -739,9 +740,12 @@ func (m *IndexStatsResponse) XXX_DiscardUnknown() { - var xxx_messageInfo_IndexStatsResponse proto.InternalMessageInfo - - type VolumeRequest struct { -- Match []string `protobuf:""bytes,1,rep,name=match,proto3"" json:""match,omitempty""` -- StartTs time.Time `protobuf:""bytes,2,opt,name=startTs,proto3,stdtime"" json:""startTs""` -- EndTs time.Time `protobuf:""bytes,3,opt,name=endTs,proto3,stdtime"" json:""endTs""` -+ From github_com_prometheus_common_model.Time `protobuf:""varint,1,opt,name=from,proto3,customtype=github.com/prometheus/common/model.Time"" json:""from""` -+ Through github_com_prometheus_common_model.Time `protobuf:""varint,2,opt,name=through,proto3,customtype=github.com/prometheus/common/model.Time"" json:""through""` -+ Matchers string `protobuf:""bytes,3,opt,name=matchers,proto3"" json:""matchers,omitempty""` -+ Limit int32 `protobuf:""varint,4,opt,name=limit,proto3"" json:""limit,omitempty""` -+ Step int64 `protobuf:""varint,5,opt,name=step,proto3"" json:""step,omitempty""` -+ TargetLabels []string `protobuf:""bytes,6,rep,name=targetLabels,proto3"" json:""targetLabels,omitempty""` - } - - func (m *VolumeRequest) Reset() { *m = VolumeRequest{} } -@@ -776,25 +780,32 @@ func (m *VolumeRequest) XXX_DiscardUnknown() { - - var xxx_messageInfo_VolumeRequest proto.InternalMessageInfo - --func (m *VolumeRequest) GetMatch() []string { -+func (m *VolumeRequest) GetMatchers() string { - if m != nil { -- return m.Match -+ return m.Matchers - } -- return nil -+ return """" - } - --func (m *VolumeRequest) GetStartTs() time.Time { -+func (m *VolumeRequest) GetLimit() int32 { - if m != nil { -- return m.StartTs -+ return m.Limit - } -- return time.Time{} -+ return 0 - } - --func (m *VolumeRequest) GetEndTs() time.Time { -+func (m *VolumeRequest) GetStep() int64 { - if m != nil { -- return m.EndTs -+ return m.Step - } -- return time.Time{} -+ return 0 -+} -+ -+func (m *VolumeRequest) GetTargetLabels() []string { -+ if m != nil { -+ return m.TargetLabels -+ } -+ return nil - } - - type VolumeResponse struct { -@@ -992,80 +1003,85 @@ func init() { - } - - var fileDescriptor_51b9d53b40d11902 = []byte{ -- // 1161 bytes of a gzipped FileDescriptorProto -- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4d, 0x6f, 0x23, 0x35, -- 0x18, 0x8e, 0xf3, 0xd5, 0xc6, 0xa5, 0x05, 0xdc, 0xb2, 0x3b, 0x2a, 0xab, 0x99, 0x28, 0x12, 0x6c, -- 0x90, 0x60, 0x22, 0xda, 0xb2, 0xcb, 0x97, 0x10, 0x3b, 0x14, 0xd4, 0x4a, 0x2b, 0x04, 0xb3, 0x15, -- 0x77, 0xa7, 0x71, 0x93, 0xa1, 0xf3, 0xd5, 0xb1, 0x53, 0xd1, 0x1b, 0x3f, 0x00, 0xa4, 0xfd, 0x0b, -- 0x5c, 0x10, 0x12, 0x88, 0x1f, 0x80, 0xc4, 0xbd, 0xc7, 0x1e, 0x57, 0x95, 0x18, 0x68, 0x7a, 0x81, -- 0x72, 0xe9, 0x4f, 0x40, 0xfe, 0x98, 0xc4, 0x93, 0x7e, 0x6c, 0xd3, 0xbd, 0x14, 0x69, 0x2f, 0x89, -- 0xed, 0x79, 0x1e, 0xdb, 0xef, 0xf3, 0x3e, 0xaf, 0x3d, 0x03, 0xef, 0xc6, 0xdb, 0xdd, 0xd6, 0x4e, -- 0x9f, 0x24, 0x1e, 0x49, 0xc4, 0xff, 0x5e, 0x82, 0xc3, 0x2e, 0xd1, 0x9a, 0x76, 0x9c, 0x44, 0x2c, -- 0x42, 0x70, 0x34, 0xb2, 0xb8, 0xd0, 0x8d, 0xba, 0x91, 0x18, 0x6e, 0xf1, 0x96, 0x44, 0x2c, 0x5a, -- 0xdd, 0x28, 0xea, 0xfa, 0xa4, 0x25, 0x7a, 0xed, 0xfe, 0x56, 0x8b, 0x79, 0x01, 0xa1, 0x0c, 0x07, -- 0xb1, 0x02, 0xbc, 0xca, 0xd7, 0xf2, 0xa3, 0xae, 0x64, 0x66, 0x0d, 0xf5, 0xb0, 0xae, 0x1e, 0xee, -- 0xf8, 0x41, 0xd4, 0x21, 0x7e, 0x8b, 0x32, 0xcc, 0xa8, 0xfc, 0x55, 0x88, 0x79, 0x8e, 0x88, 0xfb, -- 0xb4, 0x27, 0x7e, 0xd4, 0xe0, 0x27, 0x4f, 0xdd, 0x7f, 0x1b, 0x53, 0xd2, 0xea, 0x90, 0x2d, 0x2f, -- 0xf4, 0x98, 0x17, 0x85, 0x54, 0x6f, 0xab, 0x49, 0xee, 0x5d, 0x6d, 0x92, 0x71, 0x4d, 0x1a, 0x07, -- 0x45, 0x38, 0xf3, 0x30, 0xda, 0xf6, 0x5c, 0xb2, 0xd3, 0x27, 0x94, 0xa1, 0x05, 0x58, 0x11, 0x18, -- 0x03, 0xd4, 0x41, 0xb3, 0xe6, 0xca, 0x0e, 0x1f, 0xf5, 0xbd, 0xc0, 0x63, 0x46, 0xb1, 0x0e, 0x9a, -- 0xb3, 0xae, 0xec, 0x20, 0x04, 0xcb, 0x94, 0x91, 0xd8, 0x28, 0xd5, 0x41, 0xb3, 0xe4, 0x8a, 0x36, -- 0x5a, 0x84, 0xd3, 0x5e, 0xc8, 0x48, 0xb2, 0x8b, 0x7d, 0xa3, 0x26, 0xc6, 0x87, 0x7d, 0xf4, 0x11, -- 0x9c, 0xa2, 0x0c, 0x27, 0x6c, 0x83, 0x1a, 0xe5, 0x3a, 0x68, 0xce, 0x2c, 0x2d, 0xda, 0x52, 0x6f, -- 0x3b, 0xd3, 0xdb, 0xde, 0xc8, 0xf4, 0x76, 0xa6, 0xf7, 0x53, 0xab, 0xf0, 0xf8, 0x4f, 0x0b, 0xb8, -- 0x19, 0x09, 0xbd, 0x0f, 0x2b, 0x24, 0xec, 0x6c, 0x50, 0xa3, 0x32, 0x01, 0x5b, 0x52, 0xd0, 0xdb, -- 0xb0, 0xd6, 0xf1, 0x12, 0xb2, 0xc9, 0x35, 0x33, 0xaa, 0x75, 0xd0, 0x9c, 0x5b, 0x9a, 0xb7, 0x87, -- 0xf9, 0x5b, 0xcd, 0x1e, 0xb9, 0x23, 0x14, 0x0f, 0x2f, 0xc6, 0xac, 0x67, 0x4c, 0x09, 0x25, 0x44, -- 0x1b, 0x35, 0x60, 0x95, 0xf6, 0x70, 0xd2, 0xa1, 0xc6, 0x74, 0xbd, 0xd4, 0xac, 0x39, 0xf0, 0x24, -- 0xb5, 0xd4, 0x88, 0xab, 0xfe, 0x1b, 0xff, 0x00, 0x88, 0xb8, 0xa4, 0xeb, 0x21, 0x65, 0x38, 0x64, -- 0xd7, 0x51, 0xf6, 0x43, 0x58, 0xe5, 0xce, 0xdb, 0xa0, 0x42, 0xdb, 0xab, 0x86, 0xaa, 0x38, 0xf9, -- 0x58, 0xcb, 0x13, 0xc5, 0x5a, 0x39, 0x37, 0xd6, 0xea, 0x85, 0xb1, 0xfe, 0x5c, 0x86, 0x2f, 0x48, -- 0xfb, 0xd0, 0x38, 0x0a, 0x29, 0xe1, 0xa4, 0x47, 0x0c, 0xb3, 0x3e, 0x95, 0x61, 0x2a, 0x92, 0x18, -- 0x71, 0xd5, 0x13, 0xf4, 0x31, 0x2c, 0xaf, 0x62, 0x86, 0x45, 0xc8, 0x33, 0x4b, 0x0b, 0xb6, 0x66, -- 0x4a, 0x3e, 0x17, 0x7f, 0xe6, 0xdc, 0xe2, 0x51, 0x9d, 0xa4, 0xd6, 0x5c, 0x07, 0x33, 0xfc, 0x66, -- 0x14, 0x78, 0x8c, 0x04, 0x31, 0xdb, 0x73, 0x05, 0x13, 0xbd, 0x03, 0x6b, 0x9f, 0x26, 0x49, 0x94, -- 0x6c, 0xec, 0xc5, 0x44, 0x48, 0x54, 0x73, 0x6e, 0x9f, 0xa4, 0xd6, 0x3c, 0xc9, 0x06, 0x35, 0xc6, -- 0x08, 0x89, 0xde, 0x80, 0x15, 0xd1, 0x11, 0xa2, 0xd4, 0x9c, 0xf9, 0x93, 0xd4, 0x7a, 0x51, 0x50, -- 0x34, 0xb8, 0x44, 0xe4, 0x35, 0xac, 0x5c, 0x49, 0xc3, 0x61, 0x2a, 0xab, 0x7a, 0x2a, 0x0d, 0x38, -- 0xb5, 0x4b, 0x12, 0xca, 0xa7, 0x99, 0x12, 0xe3, 0x59, 0x17, 0x3d, 0x80, 0x90, 0x0b, 0xe3, 0x51, -- 0xe6, 0x6d, 0x72, 0x3f, 0x71, 0x31, 0x66, 0x6d, 0x79, 0x5c, 0xb8, 0x84, 0xf6, 0x7d, 0xe6, 0x20, -- 0xa5, 0x82, 0x06, 0x74, 0xb5, 0x36, 0xfa, 0x05, 0xc0, 0xa9, 0x35, 0x82, 0x3b, 0x24, 0xa1, 0x46, -- 0xad, 0x5e, 0x6a, 0xce, 0x2c, 0xbd, 0x66, 0xeb, 0x67, 0xc3, 0x17, 0x49, 0x14, 0x10, 0xd6, 0x23, -- 0x7d, 0x9a, 0x25, 0x48, 0xa2, 0x9d, 0xed, 0xc3, 0xd4, 0x6a, 0x77, 0x3d, 0xd6, 0xeb, 0xb7, 0xed, -- 0xcd, 0x28, 0x68, 0x75, 0x13, 0xbc, 0x85, 0x43, 0xdc, 0xf2, 0xa3, 0x6d, 0xaf, 0x35, 0xf1, 0x79, -- 0x74, 0xe1, 0x3a, 0x27, 0xa9, 0x05, 0xde, 0x72, 0xb3, 0x2d, 0x36, 0xfe, 0x00, 0xf0, 0x65, 0x9e, -- 0xe1, 0x47, 0x7c, 0x6e, 0xaa, 0x15, 0x46, 0x80, 0xd9, 0x66, 0xcf, 0x00, 0xdc, 0x66, 0xae, 0xec, -- 0xe8, 0x87, 0x45, 0xf1, 0x99, 0x0e, 0x8b, 0xd2, 0xe4, 0x87, 0x45, 0x56, 0x0d, 0xe5, 0x73, 0xab, -- 0xa1, 0x72, 0x61, 0x35, 0x7c, 0x57, 0x92, 0x95, 0x9f, 0xc5, 0x37, 0x41, 0x4d, 0x7c, 0x36, 0xac, -- 0x89, 0x92, 0xd8, 0xed, 0xd0, 0x6a, 0x72, 0xae, 0xf5, 0x0e, 0x09, 0x99, 0xb7, 0xe5, 0x91, 0xe4, -- 0x29, 0x95, 0xa1, 0xd9, 0xad, 0x94, 0xb7, 0x9b, 0xee, 0x95, 0xf2, 0x8d, 0xf7, 0xca, 0x58, 0x75, -- 0x54, 0xae, 0x51, 0x1d, 0x8d, 0xdf, 0x01, 0x7c, 0x85, 0xa7, 0xe3, 0x21, 0x6e, 0x13, 0xff, 0x73, -- 0x1c, 0x8c, 0x2c, 0xa7, 0x99, 0x0b, 0x3c, 0x93, 0xb9, 0x8a, 0xd7, 0x37, 0x57, 0x49, 0x33, 0xd7, -- 0xf0, 0x6e, 0x28, 0x6b, 0x77, 0x43, 0xe3, 0xb4, 0x08, 0x6f, 0x8d, 0xef, 0x7f, 0x02, 0x4b, 0xbd, -- 0xae, 0x59, 0xaa, 0xe6, 0xa0, 0xe7, 0x96, 0xb9, 0x82, 0x65, 0x7e, 0x04, 0x70, 0x3a, 0xbb, 0x83, -- 0x90, 0x0d, 0xa1, 0xa4, 0x89, 0x6b, 0x46, 0x0a, 0x3d, 0xc7, 0xc9, 0xc9, 0x70, 0xd4, 0xd5, 0x10, -- 0xe8, 0x6b, 0x58, 0x95, 0x3d, 0x55, 0xc5, 0xb7, 0xb5, 0x2a, 0x66, 0x09, 0xc1, 0xc1, 0x83, 0x0e, -- 0x8e, 0x19, 0x49, 0x9c, 0xf7, 0xf8, 0x2e, 0x0e, 0x53, 0xeb, 0xee, 0x65, 0x12, 0x89, 0x37, 0x44, -- 0xc9, 0xe3, 0xc9, 0x95, 0x6b, 0xba, 0x6a, 0x85, 0xc6, 0xf7, 0x00, 0xbe, 0xc4, 0x37, 0xca, 0xa5, -- 0x19, 0xba, 0x62, 0x15, 0x4e, 0x27, 0xaa, 0xad, 0x7c, 0xdd, 0xb0, 0xf3, 0xb2, 0x9e, 0x23, 0xa5, -- 0x53, 0xde, 0x4f, 0x2d, 0xe0, 0x0e, 0x99, 0x68, 0x39, 0x27, 0x63, 0xf1, 0x3c, 0x19, 0x39, 0xa5, -- 0x90, 0x13, 0xee, 0xb7, 0x22, 0x44, 0xeb, 0x61, 0x87, 0x7c, 0xc3, 0xcd, 0x37, 0xf2, 0x69, 0xff, -- 0xcc, 0x8e, 0xee, 0x8c, 0x44, 0x39, 0x8b, 0x77, 0x3e, 0x38, 0x4c, 0xad, 0xfb, 0x97, 0xa9, 0x72, -- 0x09, 0x59, 0x0b, 0x41, 0x37, 0x6e, 0xf1, 0xe6, 0xdf, 0x8b, 0x3f, 0x00, 0x38, 0xfb, 0x55, 0xe4, -- 0xf7, 0x03, 0x72, 0x63, 0xef, 0xc4, 0xc6, 0xaf, 0x45, 0x38, 0x97, 0xed, 0x51, 0xa9, 0x1c, 0x9c, -- 0x49, 0xae, 0x31, 0x4a, 0x6e, 0x1e, 0xeb, 0xdc, 0x3f, 0x4c, 0xad, 0xe5, 0x2b, 0x25, 0x36, 0x4f, -- 0xfc, 0xff, 0x26, 0xf5, 0xdf, 0x22, 0x9c, 0xfd, 0x92, 0xcf, 0x32, 0xd4, 0xeb, 0x5d, 0x58, 0xa5, -- 0xe2, 0x36, 0x57, 0x6a, 0x99, 0xe3, 0x6f, 0xbe, 0xf9, 0xf7, 0x86, 0xb5, 0x82, 0xab, 0xf0, 0xfc, -- 0x7b, 0xc0, 0xe7, 0x97, 0x40, 0x96, 0xf7, 0xc6, 0x38, 0xf3, 0xec, 0x15, 0xc1, 0xd9, 0x92, 0x83, -- 0xee, 0xc1, 0x8a, 0xa8, 0x5e, 0x95, 0xf6, 0xdc, 0xb2, 0x67, 0xcb, 0x68, 0xad, 0xe0, 0x4a, 0x38, -- 0x5a, 0x82, 0xe5, 0x38, 0x89, 0x02, 0xf5, 0xb1, 0x76, 0x67, 0x7c, 0x4d, 0xfd, 0xe8, 0x59, 0x2b, -- 0xb8, 0x02, 0x8b, 0x56, 0xb8, 0x45, 0xf9, 0x99, 0x95, 0x1d, 0xc0, 0xc6, 0x38, 0x4d, 0xa3, 0x64, -- 0x50, 0xb4, 0x02, 0xab, 0xbb, 0x22, 0xed, 0xe2, 0xdd, 0x99, 0x3b, 0x53, 0x23, 0xe5, 0x0d, 0xc1, -- 0xe3, 0x92, 0x58, 0x07, 0x8e, 0xfc, 0xe7, 0xac, 0x1c, 0x1c, 0x99, 0x85, 0x27, 0x47, 0x66, 0xe1, -- 0xf4, 0xc8, 0x04, 0xdf, 0x0e, 0x4c, 0xf0, 0xd3, 0xc0, 0x04, 0xfb, 0x03, 0x13, 0x1c, 0x0c, 0x4c, -- 0xf0, 0xd7, 0xc0, 0x04, 0x7f, 0x0f, 0xcc, 0xc2, 0xe9, 0xc0, 0x04, 0x8f, 0x8f, 0xcd, 0xc2, 0xc1, -- 0xb1, 0x59, 0x78, 0x72, 0x6c, 0x16, 0xda, 0x55, 0x61, 0xb9, 0xe5, 0xff, 0x02, 0x00, 0x00, 0xff, -- 0xff, 0xf8, 0x4c, 0x44, 0xf0, 0x43, 0x10, 0x00, 0x00, -+ // 1245 bytes of a gzipped FileDescriptorProto -+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0xcf, 0x6f, 0x1b, 0xc5, -+ 0x17, 0xf7, 0xac, 0x7f, 0xc5, 0x93, 0x26, 0xdf, 0x2f, 0x93, 0xd2, 0xae, 0x42, 0xb5, 0x6b, 0x59, -+ 0x82, 0x1a, 0x09, 0x76, 0x45, 0x5a, 0x5a, 0x7e, 0x09, 0xd1, 0x6d, 0x40, 0x89, 0x54, 0x21, 0xd8, -+ 0x46, 0xdc, 0xc7, 0xf1, 0xc4, 0x5e, 0xe2, 0xdd, 0x75, 0x66, 0xc6, 0x11, 0xb9, 0x71, 0x45, 0x02, -+ 0xa9, 0x7f, 0x05, 0x42, 0x02, 0xf1, 0x07, 0x20, 0x71, 0xcf, 0x31, 0xc7, 0x2a, 0x12, 0x86, 0x38, -+ 0x17, 0x08, 0x97, 0xfc, 0x09, 0x68, 0x66, 0x76, 0xd7, 0xb3, 0x8e, 0x93, 0xc6, 0xe9, 0xa5, 0x48, -+ 0x5c, 0xec, 0x99, 0xb7, 0xef, 0x33, 0x3b, 0xef, 0xf3, 0x3e, 0xef, 0xcd, 0x2c, 0xbc, 0xdd, 0xdf, -+ 0xee, 0xb8, 0x3b, 0x03, 0x42, 0x03, 0x42, 0xe5, 0xff, 0x1e, 0xc5, 0x51, 0x87, 0x68, 0x43, 0xa7, -+ 0x4f, 0x63, 0x1e, 0x23, 0x38, 0xb6, 0x2c, 0x5f, 0xef, 0xc4, 0x9d, 0x58, 0x9a, 0x5d, 0x31, 0x52, -+ 0x1e, 0xcb, 0x76, 0x27, 0x8e, 0x3b, 0x3d, 0xe2, 0xca, 0x59, 0x6b, 0xb0, 0xe5, 0xf2, 0x20, 0x24, -+ 0x8c, 0xe3, 0xb0, 0x9f, 0x38, 0xbc, 0x22, 0xde, 0xd5, 0x8b, 0x3b, 0x0a, 0x99, 0x0e, 0x92, 0x87, -+ 0xf5, 0xe4, 0xe1, 0x4e, 0x2f, 0x8c, 0xdb, 0xa4, 0xe7, 0x32, 0x8e, 0x39, 0x53, 0xbf, 0x89, 0xc7, -+ 0x92, 0xf0, 0xe8, 0x0f, 0x58, 0x57, 0xfe, 0x24, 0xc6, 0x87, 0xcf, 0xdc, 0x7f, 0x0b, 0x33, 0xe2, -+ 0xb6, 0xc9, 0x56, 0x10, 0x05, 0x3c, 0x88, 0x23, 0xa6, 0x8f, 0x93, 0x45, 0xee, 0x5d, 0x6e, 0x91, -+ 0x49, 0x4e, 0x1a, 0x07, 0x06, 0x9c, 0x7f, 0x14, 0x6f, 0x07, 0x3e, 0xd9, 0x19, 0x10, 0xc6, 0xd1, -+ 0x75, 0x58, 0x96, 0x3e, 0x26, 0xa8, 0x83, 0x66, 0xcd, 0x57, 0x13, 0x61, 0xed, 0x05, 0x61, 0xc0, -+ 0x4d, 0xa3, 0x0e, 0x9a, 0x0b, 0xbe, 0x9a, 0x20, 0x04, 0x4b, 0x8c, 0x93, 0xbe, 0x59, 0xac, 0x83, -+ 0x66, 0xd1, 0x97, 0x63, 0xb4, 0x0c, 0xe7, 0x82, 0x88, 0x13, 0xba, 0x8b, 0x7b, 0x66, 0x4d, 0xda, -+ 0xb3, 0x39, 0xfa, 0x10, 0x56, 0x19, 0xc7, 0x94, 0x6f, 0x30, 0xb3, 0x54, 0x07, 0xcd, 0xf9, 0x95, -+ 0x65, 0x47, 0xf1, 0xed, 0xa4, 0x7c, 0x3b, 0x1b, 0x29, 0xdf, 0xde, 0xdc, 0xfe, 0xd0, 0x2e, 0x3c, -+ 0xf9, 0xdd, 0x06, 0x7e, 0x0a, 0x42, 0xef, 0xc1, 0x32, 0x89, 0xda, 0x1b, 0xcc, 0x2c, 0xcf, 0x80, -+ 0x56, 0x10, 0xf4, 0x16, 0xac, 0xb5, 0x03, 0x4a, 0x36, 0x05, 0x67, 0x66, 0xa5, 0x0e, 0x9a, 0x8b, -+ 0x2b, 0x4b, 0x4e, 0x96, 0xbf, 0xd5, 0xf4, 0x91, 0x3f, 0xf6, 0x12, 0xe1, 0xf5, 0x31, 0xef, 0x9a, -+ 0x55, 0xc9, 0x84, 0x1c, 0xa3, 0x06, 0xac, 0xb0, 0x2e, 0xa6, 0x6d, 0x66, 0xce, 0xd5, 0x8b, 0xcd, -+ 0x9a, 0x07, 0x4f, 0x86, 0x76, 0x62, 0xf1, 0x93, 0xff, 0xc6, 0x5f, 0x00, 0x22, 0x41, 0xe9, 0x7a, -+ 0xc4, 0x38, 0x8e, 0xf8, 0x55, 0x98, 0xfd, 0x00, 0x56, 0x84, 0xf2, 0x36, 0x98, 0xe4, 0xf6, 0xb2, -+ 0xa1, 0x26, 0x98, 0x7c, 0xac, 0xa5, 0x99, 0x62, 0x2d, 0x4f, 0x8d, 0xb5, 0x72, 0x6e, 0xac, 0x3f, -+ 0x96, 0xe0, 0x35, 0x25, 0x1f, 0xd6, 0x8f, 0x23, 0x46, 0x04, 0xe8, 0x31, 0xc7, 0x7c, 0xc0, 0x54, -+ 0x98, 0x09, 0x48, 0x5a, 0xfc, 0xe4, 0x09, 0xfa, 0x08, 0x96, 0x56, 0x31, 0xc7, 0x32, 0xe4, 0xf9, -+ 0x95, 0xeb, 0x8e, 0x26, 0x4a, 0xb1, 0x96, 0x78, 0xe6, 0xdd, 0x10, 0x51, 0x9d, 0x0c, 0xed, 0xc5, -+ 0x36, 0xe6, 0xf8, 0x8d, 0x38, 0x0c, 0x38, 0x09, 0xfb, 0x7c, 0xcf, 0x97, 0x48, 0xf4, 0x36, 0xac, -+ 0x7d, 0x4c, 0x69, 0x4c, 0x37, 0xf6, 0xfa, 0x44, 0x52, 0x54, 0xf3, 0x6e, 0x9e, 0x0c, 0xed, 0x25, -+ 0x92, 0x1a, 0x35, 0xc4, 0xd8, 0x13, 0xbd, 0x0e, 0xcb, 0x72, 0x22, 0x49, 0xa9, 0x79, 0x4b, 0x27, -+ 0x43, 0xfb, 0x7f, 0x12, 0xa2, 0xb9, 0x2b, 0x8f, 0x3c, 0x87, 0xe5, 0x4b, 0x71, 0x98, 0xa5, 0xb2, -+ 0xa2, 0xa7, 0xd2, 0x84, 0xd5, 0x5d, 0x42, 0x99, 0x58, 0xa6, 0x2a, 0xed, 0xe9, 0x14, 0x3d, 0x80, -+ 0x50, 0x10, 0x13, 0x30, 0x1e, 0x6c, 0x0a, 0x3d, 0x09, 0x32, 0x16, 0x1c, 0xd5, 0x2e, 0x7c, 0xc2, -+ 0x06, 0x3d, 0xee, 0xa1, 0x84, 0x05, 0xcd, 0xd1, 0xd7, 0xc6, 0xe8, 0x27, 0x00, 0xab, 0x6b, 0x04, -+ 0xb7, 0x09, 0x65, 0x66, 0xad, 0x5e, 0x6c, 0xce, 0xaf, 0xbc, 0xea, 0xe8, 0xbd, 0xe1, 0x33, 0x1a, -+ 0x87, 0x84, 0x77, 0xc9, 0x80, 0xa5, 0x09, 0x52, 0xde, 0xde, 0xf6, 0xe1, 0xd0, 0x6e, 0x75, 0x02, -+ 0xde, 0x1d, 0xb4, 0x9c, 0xcd, 0x38, 0x74, 0x3b, 0x14, 0x6f, 0xe1, 0x08, 0xbb, 0xbd, 0x78, 0x3b, -+ 0x70, 0x67, 0xee, 0x47, 0xe7, 0xbe, 0xe7, 0x64, 0x68, 0x83, 0x37, 0xfd, 0x74, 0x8b, 0x8d, 0xdf, -+ 0x00, 0x7c, 0x49, 0x64, 0xf8, 0xb1, 0x58, 0x9b, 0x69, 0x85, 0x11, 0x62, 0xbe, 0xd9, 0x35, 0x81, -+ 0x90, 0x99, 0xaf, 0x26, 0x7a, 0xb3, 0x30, 0x9e, 0xab, 0x59, 0x14, 0x67, 0x6f, 0x16, 0x69, 0x35, -+ 0x94, 0xa6, 0x56, 0x43, 0xf9, 0xdc, 0x6a, 0xf8, 0xb6, 0xa8, 0x2a, 0x3f, 0x8d, 0x6f, 0x86, 0x9a, -+ 0xf8, 0x24, 0xab, 0x89, 0xa2, 0xdc, 0x6d, 0x26, 0x35, 0xb5, 0xd6, 0x7a, 0x9b, 0x44, 0x3c, 0xd8, -+ 0x0a, 0x08, 0x7d, 0x46, 0x65, 0x68, 0x72, 0x2b, 0xe6, 0xe5, 0xa6, 0x6b, 0xa5, 0xf4, 0xc2, 0x6b, -+ 0x65, 0xa2, 0x3a, 0xca, 0x57, 0xa8, 0x8e, 0xc6, 0xaf, 0x00, 0xbe, 0x2c, 0xd2, 0xf1, 0x08, 0xb7, -+ 0x48, 0xef, 0x53, 0x1c, 0x8e, 0x25, 0xa7, 0x89, 0x0b, 0x3c, 0x97, 0xb8, 0x8c, 0xab, 0x8b, 0xab, -+ 0xa8, 0x89, 0x2b, 0x3b, 0x1b, 0x4a, 0xda, 0xd9, 0xd0, 0x38, 0x35, 0xe0, 0x8d, 0xc9, 0xfd, 0xcf, -+ 0x20, 0xa9, 0xd7, 0x34, 0x49, 0xd5, 0x3c, 0xf4, 0x9f, 0x64, 0x2e, 0x21, 0x99, 0xef, 0x01, 0x9c, -+ 0x4b, 0xcf, 0x20, 0xe4, 0x40, 0xa8, 0x60, 0xf2, 0x98, 0x51, 0x44, 0x2f, 0x0a, 0x30, 0xcd, 0xac, -+ 0xbe, 0xe6, 0x81, 0xbe, 0x84, 0x15, 0x35, 0x4b, 0xaa, 0xf8, 0xa6, 0x56, 0xc5, 0x9c, 0x12, 0x1c, -+ 0x3e, 0x68, 0xe3, 0x3e, 0x27, 0xd4, 0x7b, 0x57, 0xec, 0xe2, 0x70, 0x68, 0xdf, 0xbe, 0x88, 0x22, -+ 0x79, 0x43, 0x54, 0x38, 0x91, 0x5c, 0xf5, 0x4e, 0x3f, 0x79, 0x43, 0xe3, 0x3b, 0x00, 0xff, 0x2f, -+ 0x36, 0x2a, 0xa8, 0xc9, 0x54, 0xb1, 0x0a, 0xe7, 0x68, 0x32, 0x4e, 0x74, 0xdd, 0x70, 0xf2, 0xb4, -+ 0x4e, 0xa1, 0xd2, 0x2b, 0xed, 0x0f, 0x6d, 0xe0, 0x67, 0x48, 0x74, 0x27, 0x47, 0xa3, 0x31, 0x8d, -+ 0x46, 0x01, 0x29, 0xe4, 0x88, 0xfb, 0xc5, 0x80, 0x68, 0x3d, 0x6a, 0x93, 0xaf, 0x84, 0xf8, 0xc6, -+ 0x3a, 0x1d, 0x9c, 0xd9, 0xd1, 0xad, 0x31, 0x29, 0x67, 0xfd, 0xbd, 0xf7, 0x0f, 0x87, 0xf6, 0xfd, -+ 0x8b, 0x58, 0xb9, 0x00, 0xac, 0x85, 0xa0, 0x0b, 0xd7, 0x78, 0xf1, 0xcf, 0xc5, 0x6f, 0x0c, 0xb8, -+ 0xf0, 0x45, 0xdc, 0x1b, 0x84, 0x24, 0x6d, 0x50, 0x0f, 0x61, 0x69, 0x8b, 0xc6, 0xa1, 0xe4, 0xac, -+ 0xe8, 0xb9, 0x53, 0xf4, 0xd2, 0xcf, 0x96, 0x76, 0x37, 0xe3, 0x30, 0x8c, 0x23, 0x57, 0x7e, 0x74, -+ 0xc8, 0xce, 0xe3, 0x4b, 0x30, 0x5a, 0x87, 0x55, 0xde, 0xa5, 0xf1, 0xa0, 0xd3, 0x95, 0x59, 0xbc, -+ 0xc2, 0x3a, 0x29, 0x5e, 0x5c, 0xeb, 0xe5, 0xb1, 0x2c, 0x08, 0x55, 0x8d, 0x2b, 0x9b, 0x8f, 0xef, -+ 0x3d, 0xa2, 0x79, 0x95, 0x27, 0x3f, 0x0e, 0xca, 0xda, 0xc7, 0x41, 0x03, 0x5e, 0xe3, 0x98, 0x76, -+ 0x08, 0x97, 0x1d, 0x2d, 0xb9, 0x57, 0xfa, 0x39, 0x5b, 0xe3, 0x67, 0x03, 0x2e, 0xa6, 0x5c, 0x24, -+ 0xd9, 0x0c, 0xcf, 0x88, 0xc8, 0x1c, 0x8b, 0x28, 0xef, 0xeb, 0xdd, 0x3f, 0x1c, 0xda, 0x77, 0x2e, -+ 0x25, 0xa0, 0x3c, 0xf0, 0xdf, 0x2b, 0x9e, 0xbf, 0x0d, 0xb8, 0xf0, 0xb9, 0x58, 0x25, 0xe3, 0xeb, -+ 0x1d, 0x58, 0x61, 0xf2, 0xd6, 0x90, 0xb0, 0x65, 0x4d, 0xde, 0xb0, 0xf3, 0xf7, 0x93, 0xb5, 0x82, -+ 0x9f, 0xf8, 0x8b, 0xef, 0x8e, 0x9e, 0x4a, 0x8d, 0x71, 0xa6, 0x7d, 0x38, 0xd3, 0x8f, 0x22, 0x81, -+ 0x56, 0x18, 0x74, 0x0f, 0x96, 0x65, 0x97, 0x48, 0xae, 0x5c, 0xb9, 0xd7, 0x9e, 0x2d, 0xd7, 0xb5, -+ 0x82, 0xaf, 0xdc, 0xd1, 0x0a, 0x2c, 0x09, 0x15, 0x26, 0x1f, 0x85, 0xb7, 0x26, 0xdf, 0xa9, 0xb7, -+ 0xb8, 0xb5, 0x82, 0x2f, 0x7d, 0xd1, 0x5d, 0x71, 0x82, 0x8b, 0xde, 0x98, 0x36, 0x7a, 0x73, 0x12, -+ 0xa6, 0x41, 0x52, 0x57, 0x74, 0x17, 0x56, 0x76, 0x65, 0xda, 0xe5, 0x1d, 0x5d, 0x1c, 0xdc, 0x1a, -+ 0x28, 0x2f, 0x08, 0x11, 0x97, 0xf2, 0xf5, 0xe0, 0x58, 0x7f, 0xde, 0xdd, 0x83, 0x23, 0xab, 0xf0, -+ 0xf4, 0xc8, 0x2a, 0x9c, 0x1e, 0x59, 0xe0, 0xeb, 0x91, 0x05, 0x7e, 0x18, 0x59, 0x60, 0x7f, 0x64, -+ 0x81, 0x83, 0x91, 0x05, 0xfe, 0x18, 0x59, 0xe0, 0xcf, 0x91, 0x55, 0x38, 0x1d, 0x59, 0xe0, 0xc9, -+ 0xb1, 0x55, 0x38, 0x38, 0xb6, 0x0a, 0x4f, 0x8f, 0xad, 0x42, 0xab, 0x22, 0x25, 0x77, 0xe7, 0x9f, -+ 0x00, 0x00, 0x00, 0xff, 0xff, 0x60, 0x11, 0x37, 0x7d, 0xab, 0x10, 0x00, 0x00, - } - - func (this *LokiRequest) Equal(that interface{}) bool { -@@ -1503,20 +1519,29 @@ func (this *VolumeRequest) Equal(that interface{}) bool { - } else if this == nil { - return false - } -- if len(this.Match) != len(that1.Match) { -+ if !this.From.Equal(that1.From) { - return false - } -- for i := range this.Match { -- if this.Match[i] != that1.Match[i] { -- return false -- } -+ if !this.Through.Equal(that1.Through) { -+ return false - } -- if !this.StartTs.Equal(that1.StartTs) { -+ if this.Matchers != that1.Matchers { - return false - } -- if !this.EndTs.Equal(that1.EndTs) { -+ if this.Limit != that1.Limit { -+ return false -+ } -+ if this.Step != that1.Step { -+ return false -+ } -+ if len(this.TargetLabels) != len(that1.TargetLabels) { - return false - } -+ for i := range this.TargetLabels { -+ if this.TargetLabels[i] != that1.TargetLabels[i] { -+ return false -+ } -+ } - return true - } - func (this *VolumeResponse) Equal(that interface{}) bool { -@@ -1880,11 +1905,14 @@ func (this *VolumeRequest) GoString() string { - if this == nil { - return ""nil"" - } -- s := make([]string, 0, 7) -+ s := make([]string, 0, 10) - s = append(s, ""&queryrange.VolumeRequest{"") -- s = append(s, ""Match: ""+fmt.Sprintf(""%#v"", this.Match)+"",\n"") -- s = append(s, ""StartTs: ""+fmt.Sprintf(""%#v"", this.StartTs)+"",\n"") -- s = append(s, ""EndTs: ""+fmt.Sprintf(""%#v"", this.EndTs)+"",\n"") -+ s = append(s, ""From: ""+fmt.Sprintf(""%#v"", this.From)+"",\n"") -+ s = append(s, ""Through: ""+fmt.Sprintf(""%#v"", this.Through)+"",\n"") -+ s = append(s, ""Matchers: ""+fmt.Sprintf(""%#v"", this.Matchers)+"",\n"") -+ s = append(s, ""Limit: ""+fmt.Sprintf(""%#v"", this.Limit)+"",\n"") -+ s = append(s, ""Step: ""+fmt.Sprintf(""%#v"", this.Step)+"",\n"") -+ s = append(s, ""TargetLabels: ""+fmt.Sprintf(""%#v"", this.TargetLabels)+"",\n"") - s = append(s, ""}"") - return strings.Join(s, """") - } -@@ -2622,31 +2650,42 @@ func (m *VolumeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - _ = i - var l int - _ = l -- n15, err15 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs):]) -- if err15 != nil { -- return 0, err15 -- } -- i -= n15 -- i = encodeVarintQueryrange(dAtA, i, uint64(n15)) -- i-- -- dAtA[i] = 0x1a -- n16, err16 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs):]) -- if err16 != nil { -- return 0, err16 -- } -- i -= n16 -- i = encodeVarintQueryrange(dAtA, i, uint64(n16)) -- i-- -- dAtA[i] = 0x12 -- if len(m.Match) > 0 { -- for iNdEx := len(m.Match) - 1; iNdEx >= 0; iNdEx-- { -- i -= len(m.Match[iNdEx]) -- copy(dAtA[i:], m.Match[iNdEx]) -- i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Match[iNdEx]))) -+ if len(m.TargetLabels) > 0 { -+ for iNdEx := len(m.TargetLabels) - 1; iNdEx >= 0; iNdEx-- { -+ i -= len(m.TargetLabels[iNdEx]) -+ copy(dAtA[i:], m.TargetLabels[iNdEx]) -+ i = encodeVarintQueryrange(dAtA, i, uint64(len(m.TargetLabels[iNdEx]))) - i-- -- dAtA[i] = 0xa -+ dAtA[i] = 0x32 - } - } -+ if m.Step != 0 { -+ i = encodeVarintQueryrange(dAtA, i, uint64(m.Step)) -+ i-- -+ dAtA[i] = 0x28 -+ } -+ if m.Limit != 0 { -+ i = encodeVarintQueryrange(dAtA, i, uint64(m.Limit)) -+ i-- -+ dAtA[i] = 0x20 -+ } -+ if len(m.Matchers) > 0 { -+ i -= len(m.Matchers) -+ copy(dAtA[i:], m.Matchers) -+ i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Matchers))) -+ i-- -+ dAtA[i] = 0x1a -+ } -+ if m.Through != 0 { -+ i = encodeVarintQueryrange(dAtA, i, uint64(m.Through)) -+ i-- -+ dAtA[i] = 0x10 -+ } -+ if m.From != 0 { -+ i = encodeVarintQueryrange(dAtA, i, uint64(m.From)) -+ i-- -+ dAtA[i] = 0x8 -+ } - return len(dAtA) - i, nil - } - -@@ -3141,16 +3180,28 @@ func (m *VolumeRequest) Size() (n int) { - } - var l int - _ = l -- if len(m.Match) > 0 { -- for _, s := range m.Match { -+ if m.From != 0 { -+ n += 1 + sovQueryrange(uint64(m.From)) -+ } -+ if m.Through != 0 { -+ n += 1 + sovQueryrange(uint64(m.Through)) -+ } -+ l = len(m.Matchers) -+ if l > 0 { -+ n += 1 + l + sovQueryrange(uint64(l)) -+ } -+ if m.Limit != 0 { -+ n += 1 + sovQueryrange(uint64(m.Limit)) -+ } -+ if m.Step != 0 { -+ n += 1 + sovQueryrange(uint64(m.Step)) -+ } -+ if len(m.TargetLabels) > 0 { -+ for _, s := range m.TargetLabels { - l = len(s) - n += 1 + l + sovQueryrange(uint64(l)) - } - } -- l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs) -- n += 1 + l + sovQueryrange(uint64(l)) -- l = github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs) -- n += 1 + l + sovQueryrange(uint64(l)) - return n - } - -@@ -3413,9 +3464,12 @@ func (this *VolumeRequest) String() string { - return ""nil"" - } - s := strings.Join([]string{`&VolumeRequest{`, -- `Match:` + fmt.Sprintf(""%v"", this.Match) + `,`, -- `StartTs:` + strings.Replace(strings.Replace(fmt.Sprintf(""%v"", this.StartTs), ""Timestamp"", ""types.Timestamp"", 1), `&`, ``, 1) + `,`, -- `EndTs:` + strings.Replace(strings.Replace(fmt.Sprintf(""%v"", this.EndTs), ""Timestamp"", ""types.Timestamp"", 1), `&`, ``, 1) + `,`, -+ `From:` + fmt.Sprintf(""%v"", this.From) + `,`, -+ `Through:` + fmt.Sprintf(""%v"", this.Through) + `,`, -+ `Matchers:` + fmt.Sprintf(""%v"", this.Matchers) + `,`, -+ `Limit:` + fmt.Sprintf(""%v"", this.Limit) + `,`, -+ `Step:` + fmt.Sprintf(""%v"", this.Step) + `,`, -+ `TargetLabels:` + fmt.Sprintf(""%v"", this.TargetLabels) + `,`, - `}`, - }, """") - return s -@@ -5526,8 +5580,46 @@ func (m *VolumeRequest) Unmarshal(dAtA []byte) error { - } - switch fieldNum { - case 1: -+ if wireType != 0 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field From"", wireType) -+ } -+ m.From = 0 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowQueryrange -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ m.From |= github_com_prometheus_common_model.Time(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ case 2: -+ if wireType != 0 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Through"", wireType) -+ } -+ m.Through = 0 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowQueryrange -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ m.Through |= github_com_prometheus_common_model.Time(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ case 3: - if wireType != 2 { -- return fmt.Errorf(""proto: wrong wireType = %d for field Match"", wireType) -+ return fmt.Errorf(""proto: wrong wireType = %d for field Matchers"", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { -@@ -5555,13 +5647,13 @@ func (m *VolumeRequest) Unmarshal(dAtA []byte) error { - if postIndex > l { - return io.ErrUnexpectedEOF - } -- m.Match = append(m.Match, string(dAtA[iNdEx:postIndex])) -+ m.Matchers = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex -- case 2: -- if wireType != 2 { -- return fmt.Errorf(""proto: wrong wireType = %d for field StartTs"", wireType) -+ case 4: -+ if wireType != 0 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Limit"", wireType) - } -- var msglen int -+ m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange -@@ -5571,30 +5663,35 @@ func (m *VolumeRequest) Unmarshal(dAtA []byte) error { - } - b := dAtA[iNdEx] - iNdEx++ -- msglen |= int(b&0x7F) << shift -+ m.Limit |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } -- if msglen < 0 { -- return ErrInvalidLengthQueryrange -- } -- postIndex := iNdEx + msglen -- if postIndex < 0 { -- return ErrInvalidLengthQueryrange -- } -- if postIndex > l { -- return io.ErrUnexpectedEOF -+ case 5: -+ if wireType != 0 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Step"", wireType) - } -- if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartTs, dAtA[iNdEx:postIndex]); err != nil { -- return err -+ m.Step = 0 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowQueryrange -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ m.Step |= int64(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } - } -- iNdEx = postIndex -- case 3: -+ case 6: - if wireType != 2 { -- return fmt.Errorf(""proto: wrong wireType = %d for field EndTs"", wireType) -+ return fmt.Errorf(""proto: wrong wireType = %d for field TargetLabels"", wireType) - } -- var msglen int -+ var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange -@@ -5604,24 +5701,23 @@ func (m *VolumeRequest) Unmarshal(dAtA []byte) error { - } - b := dAtA[iNdEx] - iNdEx++ -- msglen |= int(b&0x7F) << shift -+ stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } -- if msglen < 0 { -+ intStringLen := int(stringLen) -+ if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } -- postIndex := iNdEx + msglen -+ postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } -- if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.EndTs, dAtA[iNdEx:postIndex]); err != nil { -- return err -- } -+ m.TargetLabels = append(m.TargetLabels, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex -diff --git a/pkg/querier/queryrange/queryrange.proto b/pkg/querier/queryrange/queryrange.proto -index c3174d7fa3085..fdfccd1d6fc0a 100644 ---- a/pkg/querier/queryrange/queryrange.proto -+++ b/pkg/querier/queryrange/queryrange.proto -@@ -146,15 +146,18 @@ message IndexStatsResponse { - } - - message VolumeRequest { -- repeated string match = 1; -- google.protobuf.Timestamp startTs = 2 [ -- (gogoproto.stdtime) = true, -+ int64 from = 1 [ -+ (gogoproto.customtype) = ""github.com/prometheus/common/model.Time"", - (gogoproto.nullable) = false - ]; -- google.protobuf.Timestamp endTs = 3 [ -- (gogoproto.stdtime) = true, -+ int64 through = 2 [ -+ (gogoproto.customtype) = ""github.com/prometheus/common/model.Time"", - (gogoproto.nullable) = false - ]; -+ string matchers = 3; -+ int32 limit = 4; -+ int64 step = 5; -+ repeated string targetLabels = 6; - } - - message VolumeResponse { -diff --git a/pkg/querier/queryrange/series_volume.go b/pkg/querier/queryrange/series_volume.go -index f5bd563250b39..b5904685adf9c 100644 ---- a/pkg/querier/queryrange/series_volume.go -+++ b/pkg/querier/queryrange/series_volume.go -@@ -64,11 +64,12 @@ func NewSeriesVolumeMiddleware() queryrangebase.Middleware { - } - - reqs[bucket] = &logproto.VolumeRequest{ -- From: model.TimeFromUnix(start.Unix()), -- Through: model.TimeFromUnix(end.Unix()), -- Matchers: volReq.Matchers, -- Limit: volReq.Limit, -- Step: volReq.Step, -+ From: model.TimeFromUnix(start.Unix()), -+ Through: model.TimeFromUnix(end.Unix()), -+ Matchers: volReq.Matchers, -+ Limit: volReq.Limit, -+ Step: volReq.Step, -+ TargetLabels: volReq.TargetLabels, - } - }) - -diff --git a/pkg/querier/queryrange/split_by_interval.go b/pkg/querier/queryrange/split_by_interval.go -index d1a1bd4a1da87..904b72f1c1049 100644 ---- a/pkg/querier/queryrange/split_by_interval.go -+++ b/pkg/querier/queryrange/split_by_interval.go -@@ -295,10 +295,11 @@ func splitByTime(req queryrangebase.Request, interval time.Duration) ([]queryran - endTS := model.Time(r.GetEnd()).Time() - util.ForInterval(interval, startTS, endTS, true, func(start, end time.Time) { - reqs = append(reqs, &logproto.VolumeRequest{ -- From: model.TimeFromUnix(start.Unix()), -- Through: model.TimeFromUnix(end.Unix()), -- Matchers: r.GetMatchers(), -- Limit: r.Limit, -+ From: model.TimeFromUnix(start.Unix()), -+ Through: model.TimeFromUnix(end.Unix()), -+ Matchers: r.GetMatchers(), -+ Limit: r.Limit, -+ TargetLabels: r.TargetLabels, - }) - }) - default: -diff --git a/pkg/storage/async_store.go b/pkg/storage/async_store.go -index b62663285e85e..53d639e24b020 100644 ---- a/pkg/storage/async_store.go -+++ b/pkg/storage/async_store.go -@@ -28,7 +28,7 @@ import ( - type IngesterQuerier interface { - GetChunkIDs(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]string, error) - Stats(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) (*stats.Stats, error) -- SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) -+ SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) - } - - type AsyncStoreCfg struct { -@@ -165,7 +165,7 @@ func (a *AsyncStore) Stats(ctx context.Context, userID string, from, through mod - return &merged, nil - } - --func (a *AsyncStore) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { -+func (a *AsyncStore) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { - sp, ctx := opentracing.StartSpanFromContext(ctx, ""AsyncStore.SeriesVolume"") - defer sp.Finish() - -@@ -176,7 +176,7 @@ func (a *AsyncStore) SeriesVolume(ctx context.Context, userID string, from, thro - - if a.shouldQueryIngesters(through, model.Now()) { - jobs = append(jobs, func() (*logproto.VolumeResponse, error) { -- vols, err := a.ingesterQuerier.SeriesVolume(ctx, userID, from, through, limit, matchers...) -+ vols, err := a.ingesterQuerier.SeriesVolume(ctx, userID, from, through, limit, targetLabels, matchers...) - level.Debug(logger).Log( - ""msg"", ""queried label volumes"", - ""matchers"", matchersStr, -@@ -186,7 +186,7 @@ func (a *AsyncStore) SeriesVolume(ctx context.Context, userID string, from, thro - }) - } - jobs = append(jobs, func() (*logproto.VolumeResponse, error) { -- vols, err := a.Store.SeriesVolume(ctx, userID, from, through, limit, matchers...) -+ vols, err := a.Store.SeriesVolume(ctx, userID, from, through, limit, targetLabels, matchers...) - level.Debug(logger).Log( - ""msg"", ""queried label volume"", - ""matchers"", matchersStr, -diff --git a/pkg/storage/async_store_test.go b/pkg/storage/async_store_test.go -index 2abb2c1e45abc..b0a48a0bb9dc8 100644 ---- a/pkg/storage/async_store_test.go -+++ b/pkg/storage/async_store_test.go -@@ -39,8 +39,8 @@ func (s *storeMock) GetChunkFetcher(tm model.Time) *fetcher.Fetcher { - return args.Get(0).(*fetcher.Fetcher) - } - --func (s *storeMock) SeriesVolume(_ context.Context, userID string, from, through model.Time, _ int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { -- args := s.Called(userID, from, through, matchers) -+func (s *storeMock) SeriesVolume(_ context.Context, userID string, from, through model.Time, _ int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { -+ args := s.Called(userID, from, through, targetLabels, matchers) - - if args.Get(0) == nil { - return nil, args.Error(1) -@@ -63,8 +63,8 @@ func (i *ingesterQuerierMock) GetChunkIDs(ctx context.Context, from, through mod - return args.Get(0).([]string), args.Error(1) - } - --func (i *ingesterQuerierMock) SeriesVolume(_ context.Context, userID string, from, through model.Time, _ int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { -- args := i.Called(userID, from, through, matchers) -+func (i *ingesterQuerierMock) SeriesVolume(_ context.Context, userID string, from, through model.Time, _ int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { -+ args := i.Called(userID, from, through, targetLabels, matchers) - - if args.Get(0) == nil { - return nil, args.Error(1) -@@ -334,7 +334,7 @@ func TestSeriesVolume(t *testing.T) { - } - asyncStore := NewAsyncStore(asyncStoreCfg, store, config.SchemaConfig{}) - -- vol, err := asyncStore.SeriesVolume(context.Background(), ""test"", model.Now().Add(-2*time.Hour), model.Now(), 10, nil...) -+ vol, err := asyncStore.SeriesVolume(context.Background(), ""test"", model.Now().Add(-2*time.Hour), model.Now(), 10, nil, nil...) - require.NoError(t, err) - - require.Equal(t, &logproto.VolumeResponse{ -diff --git a/pkg/storage/stores/composite_store.go b/pkg/storage/stores/composite_store.go -index 98d7ba527b675..a0892cd2a416f 100644 ---- a/pkg/storage/stores/composite_store.go -+++ b/pkg/storage/stores/composite_store.go -@@ -195,10 +195,10 @@ func (c compositeStore) Stats(ctx context.Context, userID string, from, through - return &res, err - } - --func (c compositeStore) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { -+func (c compositeStore) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { - volumes := make([]*logproto.VolumeResponse, 0, len(c.stores)) - err := c.forStores(ctx, from, through, func(innerCtx context.Context, from, through model.Time, store Store) error { -- volume, err := store.SeriesVolume(innerCtx, userID, from, through, limit, matchers...) -+ volume, err := store.SeriesVolume(innerCtx, userID, from, through, limit, targetLabels, matchers...) - volumes = append(volumes, volume) - return err - }) -diff --git a/pkg/storage/stores/composite_store_entry.go b/pkg/storage/stores/composite_store_entry.go -index a3f56bf83c497..9c88f9c70caab 100644 ---- a/pkg/storage/stores/composite_store_entry.go -+++ b/pkg/storage/stores/composite_store_entry.go -@@ -132,7 +132,7 @@ func (c *storeEntry) Stats(ctx context.Context, userID string, from, through mod - return c.indexReader.Stats(ctx, userID, from, through, matchers...) - } - --func (c *storeEntry) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { -+func (c *storeEntry) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { - sp, ctx := opentracing.StartSpanFromContext(ctx, ""SeriesStore.Volume"") - defer sp.Finish() - -@@ -152,7 +152,7 @@ func (c *storeEntry) SeriesVolume(ctx context.Context, userID string, from, thro - ""limit"", limit, - ) - -- return c.indexReader.SeriesVolume(ctx, userID, from, through, limit, matchers...) -+ return c.indexReader.SeriesVolume(ctx, userID, from, through, limit, targetLabels, matchers...) - } - - func (c *storeEntry) validateQueryTimeRange(ctx context.Context, userID string, from *model.Time, through *model.Time) (bool, error) { -diff --git a/pkg/storage/stores/composite_store_test.go b/pkg/storage/stores/composite_store_test.go -index 0ae370e047d9e..0f2d61dfec07d 100644 ---- a/pkg/storage/stores/composite_store_test.go -+++ b/pkg/storage/stores/composite_store_test.go -@@ -56,7 +56,7 @@ func (m mockStore) Stats(_ context.Context, _ string, _, _ model.Time, _ ...*lab - return nil, nil - } - --func (m mockStore) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) { -+func (m mockStore) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ []string, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) { - return nil, nil - } - -@@ -305,7 +305,7 @@ type mockStoreSeriesVolume struct { - err error - } - --func (m mockStoreSeriesVolume) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) { -+func (m mockStoreSeriesVolume) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ []string, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) { - return m.value, m.err - } - -diff --git a/pkg/storage/stores/index/index.go b/pkg/storage/stores/index/index.go -index 59aa802b7e9dd..d466d5120ff11 100644 ---- a/pkg/storage/stores/index/index.go -+++ b/pkg/storage/stores/index/index.go -@@ -23,7 +23,7 @@ type BaseReader interface { - LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string, matchers ...*labels.Matcher) ([]string, error) - LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) - Stats(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) (*stats.Stats, error) -- SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) -+ SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) - } - - type Reader interface { -@@ -119,11 +119,11 @@ func (m monitoredReaderWriter) Stats(ctx context.Context, userID string, from, t - return sts, nil - } - --func (m monitoredReaderWriter) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { -+func (m monitoredReaderWriter) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { - var vol *logproto.VolumeResponse - if err := instrument.CollectedRequest(ctx, ""series_volume"", instrument.NewHistogramCollector(m.metrics.indexQueryLatency), instrument.ErrorCode, func(ctx context.Context) error { - var err error -- vol, err = m.rw.SeriesVolume(ctx, userID, from, through, limit, matchers...) -+ vol, err = m.rw.SeriesVolume(ctx, userID, from, through, limit, targetLabels, matchers...) - return err - }); err != nil { - return nil, err -diff --git a/pkg/storage/stores/series/series_index_gateway_store.go b/pkg/storage/stores/series/series_index_gateway_store.go -index be0a47ad70666..cdd08acf7688b 100644 ---- a/pkg/storage/stores/series/series_index_gateway_store.go -+++ b/pkg/storage/stores/series/series_index_gateway_store.go -@@ -130,12 +130,13 @@ func (c *IndexGatewayClientStore) Stats(ctx context.Context, userID string, from - return resp, nil - } - --func (c *IndexGatewayClientStore) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { -+func (c *IndexGatewayClientStore) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { - resp, err := c.client.GetSeriesVolume(ctx, &logproto.VolumeRequest{ -- From: from, -- Through: through, -- Matchers: (&syntax.MatchersExpr{Mts: matchers}).String(), -- Limit: limit, -+ From: from, -+ Through: through, -+ Matchers: (&syntax.MatchersExpr{Mts: matchers}).String(), -+ Limit: limit, -+ TargetLabels: targetLabels, - }) - if err != nil { - if isUnimplementedCallError(err) && c.fallbackStore != nil { -@@ -143,7 +144,7 @@ func (c *IndexGatewayClientStore) SeriesVolume(ctx context.Context, userID strin - // Note: this is likely a noop anyway since only - // tsdb+ enables this and the prior index returns an - // empty response. -- return c.fallbackStore.SeriesVolume(ctx, userID, from, through, limit, matchers...) -+ return c.fallbackStore.SeriesVolume(ctx, userID, from, through, limit, targetLabels, matchers...) - } - return nil, err - } -diff --git a/pkg/storage/stores/series/series_index_store.go b/pkg/storage/stores/series/series_index_store.go -index a6178d235494f..5e924b5752dc7 100644 ---- a/pkg/storage/stores/series/series_index_store.go -+++ b/pkg/storage/stores/series/series_index_store.go -@@ -717,6 +717,6 @@ func (c *indexReaderWriter) Stats(_ context.Context, _ string, _, _ model.Time, - } - - // old index stores do not implement label volume -- skip --func (c *indexReaderWriter) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) { -+func (c *indexReaderWriter) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ []string, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) { - return nil, nil - } -diff --git a/pkg/storage/stores/shipper/indexgateway/gateway.go b/pkg/storage/stores/shipper/indexgateway/gateway.go -index 5dd202c840c37..d1b4dfd361f2f 100644 ---- a/pkg/storage/stores/shipper/indexgateway/gateway.go -+++ b/pkg/storage/stores/shipper/indexgateway/gateway.go -@@ -303,7 +303,7 @@ func (g *Gateway) GetSeriesVolume(ctx context.Context, req *logproto.VolumeReque - return nil, err - } - -- return g.indexQuerier.SeriesVolume(ctx, instanceID, req.From, req.Through, req.GetLimit(), matchers...) -+ return g.indexQuerier.SeriesVolume(ctx, instanceID, req.From, req.Through, req.GetLimit(), req.TargetLabels, matchers...) - } - - type failingIndexClient struct{} -diff --git a/pkg/storage/stores/shipper/indexgateway/gateway_test.go b/pkg/storage/stores/shipper/indexgateway/gateway_test.go -index e014269d81ab1..b1b25cce388ae 100644 ---- a/pkg/storage/stores/shipper/indexgateway/gateway_test.go -+++ b/pkg/storage/stores/shipper/indexgateway/gateway_test.go -@@ -272,7 +272,7 @@ func newIngesterQuerierMock() *indexQuerierMock { - return &indexQuerierMock{} - } - --func (i *indexQuerierMock) SeriesVolume(_ context.Context, userID string, from, through model.Time, _ int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { -+func (i *indexQuerierMock) SeriesVolume(_ context.Context, userID string, from, through model.Time, _ int32, _ []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { - args := i.Called(userID, from, through, matchers) - - if args.Get(0) == nil { -diff --git a/pkg/storage/stores/shipper/indexgateway/shufflesharding.go b/pkg/storage/stores/shipper/indexgateway/shufflesharding.go -index 03dfb61b200fb..b563869d3719c 100644 ---- a/pkg/storage/stores/shipper/indexgateway/shufflesharding.go -+++ b/pkg/storage/stores/shipper/indexgateway/shufflesharding.go -@@ -101,7 +101,7 @@ func (s *NoopStrategy) FilterTenants(tenantIDs []string) ([]string, error) { - return tenantIDs, nil - } - --// GetShardingStrategy returns the correct ShardingStrategy implementaion based -+// GetShardingStrategy returns the correct ShardingStrategy implementation based - // on provided configuration. - func GetShardingStrategy(cfg Config, indexGatewayRingManager *RingManager, o Limits) ShardingStrategy { - if cfg.Mode != RingMode || indexGatewayRingManager.Mode == ClientMode { -diff --git a/pkg/storage/stores/tsdb/head_manager.go b/pkg/storage/stores/tsdb/head_manager.go -index 281e569af5f37..261612fbc5444 100644 ---- a/pkg/storage/stores/tsdb/head_manager.go -+++ b/pkg/storage/stores/tsdb/head_manager.go -@@ -783,12 +783,12 @@ func (t *tenantHeads) Stats(ctx context.Context, userID string, from, through mo - return idx.Stats(ctx, userID, from, through, acc, shard, shouldIncludeChunk, matchers...) - } - --func (t *tenantHeads) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, matchers ...*labels.Matcher) error { -+func (t *tenantHeads) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, targetLabels []string, matchers ...*labels.Matcher) error { - idx, ok := t.tenantIndex(userID, from, through) - if !ok { - return nil - } -- return idx.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, matchers...) -+ return idx.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, targetLabels, matchers...) - } - - // helper only used in building TSDBs -diff --git a/pkg/storage/stores/tsdb/index.go b/pkg/storage/stores/tsdb/index.go -index e72c70c9c02de..5ccccb7ecdaae 100644 ---- a/pkg/storage/stores/tsdb/index.go -+++ b/pkg/storage/stores/tsdb/index.go -@@ -53,7 +53,7 @@ type Index interface { - LabelNames(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]string, error) - LabelValues(ctx context.Context, userID string, from, through model.Time, name string, matchers ...*labels.Matcher) ([]string, error) - Stats(ctx context.Context, userID string, from, through model.Time, acc IndexStatsAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, matchers ...*labels.Matcher) error -- SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, matchers ...*labels.Matcher) error -+ SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, targetLabels []string, matchers ...*labels.Matcher) error - } - - type NoopIndex struct{} -@@ -81,6 +81,6 @@ func (NoopIndex) Stats(_ context.Context, _ string, _, _ model.Time, _ IndexStat - - func (NoopIndex) SetChunkFilterer(_ chunk.RequestChunkFilterer) {} - --func (NoopIndex) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ SeriesVolumeAccumulator, _ *index.ShardAnnotation, _ shouldIncludeChunk, _ ...*labels.Matcher) error { -+func (NoopIndex) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ SeriesVolumeAccumulator, _ *index.ShardAnnotation, _ shouldIncludeChunk, _ []string, _ ...*labels.Matcher) error { - return nil - } -diff --git a/pkg/storage/stores/tsdb/index_client.go b/pkg/storage/stores/tsdb/index_client.go -index 1484c38a846a2..b8a3d80425a82 100644 ---- a/pkg/storage/stores/tsdb/index_client.go -+++ b/pkg/storage/stores/tsdb/index_client.go -@@ -246,7 +246,7 @@ func (c *IndexClient) Stats(ctx context.Context, userID string, from, through mo - return &res, nil - } - --func (c *IndexClient) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { -+func (c *IndexClient) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) { - sp, ctx := opentracing.StartSpanFromContext(ctx, ""IndexClient.SeriesVolume"") - defer sp.Finish() - -@@ -266,7 +266,7 @@ func (c *IndexClient) SeriesVolume(ctx context.Context, userID string, from, thr - - acc := seriesvolume.NewAccumulator(limit, c.limits.VolumeMaxSeries(userID)) - for _, interval := range intervals { -- if err := c.idx.SeriesVolume(ctx, userID, interval.Start, interval.End, acc, shard, nil, matchers...); err != nil { -+ if err := c.idx.SeriesVolume(ctx, userID, interval.Start, interval.End, acc, shard, nil, targetLabels, matchers...); err != nil { - return nil, err - } - } -diff --git a/pkg/storage/stores/tsdb/index_client_test.go b/pkg/storage/stores/tsdb/index_client_test.go -index cf3cc2ee15844..d8c5a6d422fa9 100644 ---- a/pkg/storage/stores/tsdb/index_client_test.go -+++ b/pkg/storage/stores/tsdb/index_client_test.go -@@ -279,7 +279,7 @@ func TestIndexClient_SeriesVolume(t *testing.T) { - through := indexStartToday + 1000 - - t.Run(""it returns series volumes from the whole index"", func(t *testing.T) { -- vol, err := indexClient.SeriesVolume(context.Background(), """", from, through, 10, nil...) -+ vol, err := indexClient.SeriesVolume(context.Background(), """", from, through, 10, nil, nil...) - require.NoError(t, err) - - require.Equal(t, &logproto.VolumeResponse{ -@@ -294,7 +294,7 @@ func TestIndexClient_SeriesVolume(t *testing.T) { - }) - - t.Run(""it returns largest series from the index"", func(t *testing.T) { -- vol, err := indexClient.SeriesVolume(context.Background(), """", from, through, 1, nil...) -+ vol, err := indexClient.SeriesVolume(context.Background(), """", from, through, 1, nil, nil...) - require.NoError(t, err) - - require.Equal(t, &logproto.VolumeResponse{ -@@ -307,7 +307,7 @@ func TestIndexClient_SeriesVolume(t *testing.T) { - - t.Run(""it returns an error when the number of selected series exceeds the limit"", func(t *testing.T) { - limits.volumeMaxSeries = 0 -- _, err := indexClient.SeriesVolume(context.Background(), """", from, through, 1, nil...) -+ _, err := indexClient.SeriesVolume(context.Background(), """", from, through, 1, nil, nil...) - require.EqualError(t, err, fmt.Sprintf(seriesvolume.ErrVolumeMaxSeriesHit, 0)) - }) - } -diff --git a/pkg/storage/stores/tsdb/index_shipper_querier.go b/pkg/storage/stores/tsdb/index_shipper_querier.go -index 34591b101fdfa..115544e5d8354 100644 ---- a/pkg/storage/stores/tsdb/index_shipper_querier.go -+++ b/pkg/storage/stores/tsdb/index_shipper_querier.go -@@ -125,13 +125,13 @@ func (i *indexShipperQuerier) Stats(ctx context.Context, userID string, from, th - return idx.Stats(ctx, userID, from, through, acc, shard, shouldIncludeChunk, matchers...) - } - --func (i *indexShipperQuerier) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, matchers ...*labels.Matcher) error { -+func (i *indexShipperQuerier) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, targetLabels []string, matchers ...*labels.Matcher) error { - idx, err := i.indices(ctx, from, through, userID) - if err != nil { - return err - } - -- return idx.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, matchers...) -+ return idx.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, targetLabels, matchers...) - } - - type resultAccumulator struct { -diff --git a/pkg/storage/stores/tsdb/lazy_index.go b/pkg/storage/stores/tsdb/lazy_index.go -index 17b8cf051da1b..5a862a407a26f 100644 ---- a/pkg/storage/stores/tsdb/lazy_index.go -+++ b/pkg/storage/stores/tsdb/lazy_index.go -@@ -73,10 +73,10 @@ func (f LazyIndex) Stats(ctx context.Context, userID string, from, through model - return i.Stats(ctx, userID, from, through, acc, shard, shouldIncludeChunk, matchers...) - } - --func (f LazyIndex) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, matchers ...*labels.Matcher) error { -+func (f LazyIndex) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, targetLabels []string, matchers ...*labels.Matcher) error { - i, err := f() - if err != nil { - return err - } -- return i.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, matchers...) -+ return i.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, targetLabels, matchers...) - } -diff --git a/pkg/storage/stores/tsdb/multi_file_index.go b/pkg/storage/stores/tsdb/multi_file_index.go -index fee2a0783c165..7630a3648af74 100644 ---- a/pkg/storage/stores/tsdb/multi_file_index.go -+++ b/pkg/storage/stores/tsdb/multi_file_index.go -@@ -337,8 +337,8 @@ func (i *MultiIndex) Stats(ctx context.Context, userID string, from, through mod - }) - } - --func (i *MultiIndex) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, matchers ...*labels.Matcher) error { -+func (i *MultiIndex) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, targetLabels []string, matchers ...*labels.Matcher) error { - return i.forMatchingIndices(ctx, from, through, func(ctx context.Context, idx Index) error { -- return idx.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, matchers...) -+ return idx.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, targetLabels, matchers...) - }) - } -diff --git a/pkg/storage/stores/tsdb/multitenant.go b/pkg/storage/stores/tsdb/multitenant.go -index 4085000ba4310..7689f778253e2 100644 ---- a/pkg/storage/stores/tsdb/multitenant.go -+++ b/pkg/storage/stores/tsdb/multitenant.go -@@ -93,6 +93,6 @@ func (m *MultiTenantIndex) Stats(ctx context.Context, userID string, from, throu - return m.idx.Stats(ctx, userID, from, through, acc, shard, shouldIncludeChunk, withTenantLabelMatcher(userID, matchers)...) - } - --func (m *MultiTenantIndex) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, matchers ...*labels.Matcher) error { -- return m.idx.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, withTenantLabelMatcher(userID, matchers)...) -+func (m *MultiTenantIndex) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, targetLabels []string, matchers ...*labels.Matcher) error { -+ return m.idx.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, targetLabels, withTenantLabelMatcher(userID, matchers)...) - } -diff --git a/pkg/storage/stores/tsdb/single_file_index.go b/pkg/storage/stores/tsdb/single_file_index.go -index f38513c79c72b..62c1a77b6f504 100644 ---- a/pkg/storage/stores/tsdb/single_file_index.go -+++ b/pkg/storage/stores/tsdb/single_file_index.go -@@ -17,6 +17,7 @@ import ( - ""github.com/grafana/loki/pkg/storage/chunk"" - index_shipper ""github.com/grafana/loki/pkg/storage/stores/indexshipper/index"" - ""github.com/grafana/loki/pkg/storage/stores/tsdb/index"" -+ ""github.com/grafana/loki/pkg/util"" - util_log ""github.com/grafana/loki/pkg/util/log"" - ) - -@@ -324,24 +325,18 @@ func (i *TSDBIndex) Stats(ctx context.Context, _ string, from, through model.Tim - // {foo=""a"", fizz=""b""} - // {foo=""b"", fizz=""a""} - // {foo=""b"", fizz=""b""} --func (i *TSDBIndex) SeriesVolume(ctx context.Context, _ string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, _ shouldIncludeChunk, matchers ...*labels.Matcher) error { -+// -+// SeriesVolume optionally accepts a slice of target labels. If provided, volumes are aggregated -+// into those labels only. For example, given the matcher {fizz=~"".+""} and target labels of []string{""foo""}, -+// volumes would be aggregated as follows: -+// -+// {foo=""a""} which would be the sum of {foo=""a"", fizz=""a""} and {foo=""a"", fizz=""b""} -+// {foo=""b""} which would be the sum of {foo=""b"", fizz=""a""} and {foo=""b"", fizz=""b""} -+func (i *TSDBIndex) SeriesVolume(ctx context.Context, _ string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, _ shouldIncludeChunk, targetLabels []string, matchers ...*labels.Matcher) error { - sp, ctx := opentracing.StartSpanFromContext(ctx, ""Index.SeriesVolume"") - defer sp.Finish() - -- var matchAll bool -- labelsToMatch := make(map[string]struct{}) -- for _, m := range matchers { -- if m.Name == """" { -- matchAll = true -- continue -- } -- -- if m.Name == TenantLabel { -- continue -- } -- -- labelsToMatch[m.Name] = struct{}{} -- } -+ labelsToMatch, matchers, includeAll := util.PrepareLabelsAndMatchers(targetLabels, matchers, TenantLabel) - - seriesNames := make(map[uint64]string) - seriesLabels := labels.Labels(make([]labels.Label, 0, len(labelsToMatch))) -@@ -371,7 +366,7 @@ func (i *TSDBIndex) SeriesVolume(ctx context.Context, _ string, from, through mo - if stats.Entries > 0 { - seriesLabels = seriesLabels[:0] - for _, l := range ls { -- if _, ok := labelsToMatch[l.Name]; l.Name != TenantLabel && matchAll || ok { -+ if _, ok := labelsToMatch[l.Name]; l.Name != TenantLabel && includeAll || ok { - seriesLabels = append(seriesLabels, l) - } - } -diff --git a/pkg/storage/stores/tsdb/single_file_index_test.go b/pkg/storage/stores/tsdb/single_file_index_test.go -index 08c5c4ac9f3b4..8d258feee0e98 100644 ---- a/pkg/storage/stores/tsdb/single_file_index_test.go -+++ b/pkg/storage/stores/tsdb/single_file_index_test.go -@@ -427,7 +427,7 @@ func TestTSDBIndex_SeriesVolume(t *testing.T) { - t.Run(""it matches all the series when the match all matcher is passed"", func(t *testing.T) { - matcher := labels.MustNewMatcher(labels.MatchEqual, """", """") - acc := seriesvolume.NewAccumulator(10, 10) -- err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, matcher) -+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, nil, matcher) - require.NoError(t, err) - require.Equal(t, &logproto.VolumeResponse{ - Volumes: []logproto.Volume{ -@@ -444,7 +444,7 @@ func TestTSDBIndex_SeriesVolume(t *testing.T) { - labels.MustNewMatcher(labels.MatchRegexp, ""foo"", "".+""), - } - acc := seriesvolume.NewAccumulator(10, 10) -- err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, withTenantLabelMatcher(""fake"", matcher)...) -+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, nil, withTenantLabelMatcher(""fake"", matcher)...) - require.NoError(t, err) - require.Equal(t, &logproto.VolumeResponse{ - Volumes: []logproto.Volume{ -@@ -458,7 +458,7 @@ func TestTSDBIndex_SeriesVolume(t *testing.T) { - t.Run(""it matches none of the series"", func(t *testing.T) { - matcher := labels.MustNewMatcher(labels.MatchEqual, ""foo"", ""baz"") - acc := seriesvolume.NewAccumulator(10, 10) -- err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, matcher) -+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, nil, matcher) - require.NoError(t, err) - require.Equal(t, &logproto.VolumeResponse{ - Volumes: []logproto.Volume{}, -@@ -469,7 +469,7 @@ func TestTSDBIndex_SeriesVolume(t *testing.T) { - t.Run(""it only returns results for the labels in the matcher"", func(t *testing.T) { - matcher := labels.MustNewMatcher(labels.MatchEqual, ""foo"", ""bar"") - acc := seriesvolume.NewAccumulator(10, 10) -- err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, matcher) -+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, nil, matcher) - require.NoError(t, err) - require.Equal(t, &logproto.VolumeResponse{ - Volumes: []logproto.Volume{ -@@ -485,7 +485,7 @@ func TestTSDBIndex_SeriesVolume(t *testing.T) { - labels.MustNewMatcher(labels.MatchRegexp, ""fizz"", "".+""), - } - acc := seriesvolume.NewAccumulator(10, 10) -- err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, matchers...) -+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, nil, matchers...) - require.NoError(t, err) - require.Equal(t, &logproto.VolumeResponse{ - Volumes: []logproto.Volume{ -@@ -502,7 +502,7 @@ func TestTSDBIndex_SeriesVolume(t *testing.T) { - labels.MustNewMatcher(labels.MatchRegexp, ""fizz"", "".+""), - } - acc := seriesvolume.NewAccumulator(10, 10) -- err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, matchers...) -+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, nil, matchers...) - require.NoError(t, err) - require.Equal(t, &logproto.VolumeResponse{ - Volumes: []logproto.Volume{ -@@ -519,7 +519,7 @@ func TestTSDBIndex_SeriesVolume(t *testing.T) { - - matcher := labels.MustNewMatcher(labels.MatchEqual, """", """") - acc := seriesvolume.NewAccumulator(10, 10) -- err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, matcher) -+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, nil, matcher) - - require.NoError(t, err) - require.Equal(t, &logproto.VolumeResponse{ -@@ -531,7 +531,7 @@ func TestTSDBIndex_SeriesVolume(t *testing.T) { - t.Run(""only gets factor of stream size within time bounds"", func(t *testing.T) { - matcher := labels.MustNewMatcher(labels.MatchEqual, """", """") - acc := seriesvolume.NewAccumulator(10, 10) -- err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through.Add(-30*time.Minute), acc, nil, nil, matcher) -+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through.Add(-30*time.Minute), acc, nil, nil, nil, matcher) - require.NoError(t, err) - require.Equal(t, &logproto.VolumeResponse{ - Volumes: []logproto.Volume{ -@@ -541,6 +541,49 @@ func TestTSDBIndex_SeriesVolume(t *testing.T) { - Limit: 10, - }, acc.Volumes()) - }) -+ -+ t.Run(""when targetLabels provided, it aggregates by those labels only"", func(t *testing.T) { -+ t.Run(""all targetLabels are added to matchers"", func(t *testing.T) { -+ matcher := labels.MustNewMatcher(labels.MatchEqual, """", """") -+ acc := seriesvolume.NewAccumulator(10, 10) -+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, []string{""fizz""}, matcher) -+ require.NoError(t, err) -+ require.Equal(t, &logproto.VolumeResponse{ -+ Volumes: []logproto.Volume{ -+ {Name: `{fizz=""fizz""}`, Volume: (30 + 40) * 1024}, -+ {Name: `{fizz=""buzz""}`, Volume: (10 + 20) * 1024}, -+ }, -+ Limit: 10, -+ }, acc.Volumes()) -+ }) -+ -+ t.Run(""with a specific equals matcher"", func(t *testing.T) { -+ matcher := labels.MustNewMatcher(labels.MatchEqual, ""foo"", ""bar"") -+ acc := seriesvolume.NewAccumulator(10, 10) -+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, []string{""fizz""}, matcher) -+ require.NoError(t, err) -+ require.Equal(t, &logproto.VolumeResponse{ -+ Volumes: []logproto.Volume{ -+ {Name: `{fizz=""fizz""}`, Volume: (30 + 40) * 1024}, -+ {Name: `{fizz=""buzz""}`, Volume: (10 + 20) * 1024}, -+ }, -+ Limit: 10, -+ }, acc.Volumes()) -+ }) -+ -+ t.Run(""with a specific regexp matcher"", func(t *testing.T) { -+ matcher := labels.MustNewMatcher(labels.MatchRegexp, ""fizz"", "".+"") -+ acc := seriesvolume.NewAccumulator(10, 10) -+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, []string{""foo""}, matcher) -+ require.NoError(t, err) -+ require.Equal(t, &logproto.VolumeResponse{ -+ Volumes: []logproto.Volume{ -+ {Name: `{foo=""bar""}`, Volume: (100) * 1024}, -+ }, -+ Limit: 10, -+ }, acc.Volumes()) -+ }) -+ }) - } - - type filterAll struct{} -diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go -index 83cf688f9c81a..a7ae4c1c281d9 100644 ---- a/pkg/storage/util_test.go -+++ b/pkg/storage/util_test.go -@@ -259,7 +259,7 @@ func (m *mockChunkStore) Stats(_ context.Context, _ string, _, _ model.Time, _ . - return nil, nil - } - --func (m *mockChunkStore) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) { -+func (m *mockChunkStore) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ []string, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) { - return nil, nil - } - -diff --git a/pkg/util/series_volume.go b/pkg/util/series_volume.go -new file mode 100644 -index 0000000000000..206dfc18ac902 ---- /dev/null -+++ b/pkg/util/series_volume.go -@@ -0,0 +1,73 @@ -+package util -+ -+import ""github.com/prometheus/prometheus/model/labels"" -+ -+// PrepareLabelsAndMatchers is used by the ingester and index gateway to service series volume requests. -+// It returns a map of labels to aggregate into, a list of matchers to match streams against, -+// as well a boolean to indicate if a match all selector was provided. -+// -+// The last argument, tenantLabel, is optional. If povided, a single string of the internal tenant label namne is expected. -+func PrepareLabelsAndMatchers(targetLabels []string, matchers []*labels.Matcher, tenantLabel ...string) (map[string]struct{}, []*labels.Matcher, bool) { -+ if len(targetLabels) > 0 { -+ return prepareLabelsAndMatchersWithTargets(targetLabels, matchers, tenantLabel...) -+ } -+ -+ var includeAll bool -+ labelsToMatch := make(map[string]struct{}) -+ -+ for _, m := range matchers { -+ if m.Name == """" { -+ includeAll = true -+ continue -+ } -+ -+ if len(tenantLabel) == 1 && m.Name == tenantLabel[0] { -+ continue -+ } -+ -+ labelsToMatch[m.Name] = struct{}{} -+ } -+ -+ return labelsToMatch, matchers, includeAll -+} -+ -+func prepareLabelsAndMatchersWithTargets(targetLabels []string, matchers []*labels.Matcher, tenantLabel ...string) (map[string]struct{}, []*labels.Matcher, bool) { -+ matchAllIndex := -1 -+ labelsToMatch := make(map[string]struct{}) -+ targetsFound := make(map[string]bool, len(targetLabels)) -+ -+ for _, target := range targetLabels { -+ labelsToMatch[target] = struct{}{} -+ targetsFound[target] = false -+ } -+ -+ for i, m := range matchers { -+ if m.Name == """" { -+ matchAllIndex = i -+ continue -+ } -+ -+ if len(tenantLabel) == 1 && m.Name == tenantLabel[0] { -+ continue -+ } -+ -+ if _, ok := targetsFound[m.Name]; ok { -+ targetsFound[m.Name] = true -+ } -+ } -+ -+ // Make sure all target labels are included in the matchers. -+ for target, found := range targetsFound { -+ if !found { -+ matcher := labels.MustNewMatcher(labels.MatchRegexp, target, "".+"") -+ matchers = append(matchers, matcher) -+ } -+ } -+ -+ // If target labels has added a matcher, we can remove the all matcher -+ if matchAllIndex > -1 && len(matchers) > 1 { -+ matchers = append(matchers[:matchAllIndex], matchers[matchAllIndex+1:]...) -+ } -+ -+ return labelsToMatch, matchers, false -+}",unknown,"Add targetLabels to SeriesVolume requests (#9878) - -Adds optional `targetLabels` parameter to `series_volume` and -`series_volume_range` requests that controls how volumes are aggregated. -When provided, volumes are aggregated into the intersections of the -provided `targetLabels` only." -5ab9515020658860053590c9e38e0262be78a9b1,2023-09-12 13:19:33,Salva Corts,"Fix regression when parsing numbers in Push request (#10550) - -**What this PR does / why we need it**: -Even though the[ Loki HTTP API docs for the push endpoint][1] state that -the stream label values should be strings, we previously didn't enforce -this requirement. With https://github.com/grafana/loki/pull/9694, we -started enforcing this requirement, and that broke some users. - -In this PR we are reverting this type of assertion and adding a bunch of -tests to avoid the regression in the future. - - -[1]: -https://grafana.com/docs/loki/latest/reference/api/#push-log-entries-to-loki",False,"diff --git a/pkg/loghttp/query.go b/pkg/loghttp/query.go -index d5b35ec6b69f6..287fd6a312129 100644 ---- a/pkg/loghttp/query.go -+++ b/pkg/loghttp/query.go -@@ -69,15 +69,8 @@ func (s *LogProtoStream) UnmarshalJSON(data []byte) error { - err := jsonparser.ObjectEach(data, func(key, val []byte, ty jsonparser.ValueType, _ int) error { - switch string(key) { - case ""stream"": -- labels := make(LabelSet) -- err := jsonparser.ObjectEach(val, func(key, val []byte, dataType jsonparser.ValueType, _ int) error { -- if dataType != jsonparser.String { -- return jsonparser.MalformedStringError -- } -- labels[string(key)] = string(val) -- return nil -- }) -- if err != nil { -+ var labels LabelSet -+ if err := labels.UnmarshalJSON(val); err != nil { - return err - } - s.Labels = labels.String() -diff --git a/pkg/util/unmarshal/unmarshal_test.go b/pkg/util/unmarshal/unmarshal_test.go -index 34e4c2dbf3503..9fdaf27512127 100644 ---- a/pkg/util/unmarshal/unmarshal_test.go -+++ b/pkg/util/unmarshal/unmarshal_test.go -@@ -7,6 +7,7 @@ import ( - ""testing"" - ""time"" - -+ ""github.com/prometheus/prometheus/model/labels"" - ""github.com/stretchr/testify/require"" - - ""github.com/grafana/loki/pkg/loghttp"" -@@ -15,24 +16,28 @@ import ( - ""github.com/grafana/loki/pkg/util/marshal"" - ) - --// covers requests to /loki/api/v1/push --var pushTests = []struct { -- expected []logproto.Stream -- actual string --}{ -- { -- []logproto.Stream{ -- { -- Entries: []logproto.Entry{ -- { -- Timestamp: time.Unix(0, 123456789012345), -- Line: ""super line"", -+func Test_DecodePushRequest(t *testing.T) { -+ // covers requests to /loki/api/v1/push -+ for _, tc := range []struct { -+ name string -+ expected []logproto.Stream -+ expectedErr bool -+ actual string -+ }{ -+ { -+ name: ""basic"", -+ expected: []logproto.Stream{ -+ { -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 123456789012345), -+ Line: ""super line"", -+ }, - }, -+ Labels: labels.FromStrings(""test"", ""test"").String(), - }, -- Labels: `{test=""test""}`, - }, -- }, -- `{ -+ actual: `{ - ""streams"": [ - { - ""stream"": { -@@ -44,24 +49,25 @@ var pushTests = []struct { - } - ] - }`, -- }, -- { -- []logproto.Stream{ -- { -- Entries: []logproto.Entry{ -- { -- Timestamp: time.Unix(0, 123456789012345), -- Line: ""super line"", -- StructuredMetadata: []logproto.LabelAdapter{ -- {Name: ""a"", Value: ""1""}, -- {Name: ""b"", Value: ""2""}, -+ }, -+ { -+ name: ""with structured metadata"", -+ expected: []logproto.Stream{ -+ { -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 123456789012345), -+ Line: ""super line"", -+ StructuredMetadata: []logproto.LabelAdapter{ -+ {Name: ""a"", Value: ""1""}, -+ {Name: ""b"", Value: ""2""}, -+ }, - }, - }, -+ Labels: labels.FromStrings(""test"", ""test"").String(), - }, -- Labels: `{test=""test""}`, - }, -- }, -- `{ -+ actual: `{ - ""streams"": [ - { - ""stream"": { -@@ -73,18 +79,100 @@ var pushTests = []struct { - } - ] - }`, -- }, --} -+ }, - --func Test_DecodePushRequest(t *testing.T) { -- for i, pushTest := range pushTests { -- var actual logproto.PushRequest -- closer := io.NopCloser(strings.NewReader(pushTest.actual)) -+ // The following test cases are added to cover a regression. Even though the Loki HTTP API -+ // docs for the push endpoint state that the stream label values should be strings, we -+ // previously didn't enforce this requirement. -+ // With https://github.com/grafana/loki/pull/9694, we started enforcing this requirement -+ // and that broke some users. We are adding these test cases to ensure that we don't -+ // enforce this requirement in the future. Note that we may want to enforce this requirement -+ // in a future major release, in which case we should modify these test cases. -+ { -+ name: ""number in stream label value"", -+ expected: []logproto.Stream{ -+ { -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 123456789012345), -+ Line: ""super line"", -+ }, -+ }, -+ Labels: labels.FromStrings(""test"", ""test"", ""number"", ""123"").String(), -+ }, -+ }, -+ actual: `{ -+ ""streams"": [ -+ { -+ ""stream"": { -+ ""test"": ""test"", -+ ""number"": 123 -+ }, -+ ""values"":[ -+ [ ""123456789012345"", ""super line"" ] -+ ] -+ } -+ ] -+ }`, -+ }, -+ { -+ name: ""string without quotes in stream label value"", -+ expectedErr: true, -+ actual: `{ -+ ""streams"": [ -+ { -+ ""stream"": { -+ ""test"": ""test"", -+ ""text"": None -+ }, -+ ""values"":[ -+ [ ""123456789012345"", ""super line"" ] -+ ] -+ } -+ ] -+ }`, -+ }, -+ { -+ name: ""json object in stream label value"", -+ expected: []logproto.Stream{ -+ { -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 123456789012345), -+ Line: ""super line"", -+ }, -+ }, -+ Labels: labels.FromStrings(""test"", ""test"", ""text"", ""{ \""a\"": \""b\"" }"").String(), -+ }, -+ }, -+ actual: `{ -+ ""streams"": [ -+ { -+ ""stream"": { -+ ""test"": ""test"", -+ ""text"": { ""a"": ""b"" } -+ }, -+ ""values"":[ -+ [ ""123456789012345"", ""super line"" ] -+ ] -+ } -+ ] -+ }`, -+ }, -+ } { -+ t.Run(tc.name, func(t *testing.T) { -+ var actual logproto.PushRequest -+ closer := io.NopCloser(strings.NewReader(tc.actual)) - -- err := DecodePushRequest(closer, &actual) -- require.NoError(t, err) -+ err := DecodePushRequest(closer, &actual) -+ if tc.expectedErr { -+ require.Error(t, err) -+ return -+ } -+ require.NoError(t, err) - -- require.Equalf(t, pushTest.expected, actual.Streams, ""Push Test %d failed"", i) -+ require.Equal(t, tc.expected, actual.Streams) -+ }) - } - }",unknown,"Fix regression when parsing numbers in Push request (#10550) - -**What this PR does / why we need it**: -Even though the[ Loki HTTP API docs for the push endpoint][1] state that -the stream label values should be strings, we previously didn't enforce -this requirement. With https://github.com/grafana/loki/pull/9694, we -started enforcing this requirement, and that broke some users. - -In this PR we are reverting this type of assertion and adding a bunch of -tests to avoid the regression in the future. - - -[1]: -https://grafana.com/docs/loki/latest/reference/api/#push-log-entries-to-loki" -4e4359e67c6c760e175d2b517dc87ec76a385e42,2023-02-16 14:48:02,Vladyslav Diachenko,"Fixed XSS on LogQL Analyzer page (#8521) - -Signed-off-by: Vladyslav Diachenko ",False,"diff --git a/docs/sources/logql/analyzer.md b/docs/sources/logql/analyzer.md -index 079b27a05d8c4..0d4cbf77de10a 100644 ---- a/docs/sources/logql/analyzer.md -+++ b/docs/sources/logql/analyzer.md -@@ -2,7 +2,6 @@ - title: LogQL Analyzer - menuTitle: LoqQL Analyzer - description: The LogQL Analyzer is an inline educational tool for experimenting with writing LogQL queries. --draft: true - weight: 60 - --- - -diff --git a/docs/sources/logql/analyzer/script.js b/docs/sources/logql/analyzer/script.js -index eabfda4e21161..0c043ce721582 100644 ---- a/docs/sources/logql/analyzer/script.js -+++ b/docs/sources/logql/analyzer/script.js -@@ -83,7 +83,8 @@ async function handleResponse(response) { - } - - function handleError(error) { -- document.getElementById(""query-error"").innerHTML = error -+ const template = Handlebars.compile(""{{error_text}}""); -+ document.getElementById(""query-error"").innerHTML = template({error_text:error}) - document.getElementById(""query-error"").classList.remove(""hide""); - resultsElement.classList.add(""hide""); - }",unknown,"Fixed XSS on LogQL Analyzer page (#8521) - -Signed-off-by: Vladyslav Diachenko " -d97724e8dd12214092932f7e82cd46a3bb6bfd74,2022-05-04 20:20:09,Travis Patterson,"Introduce coverage to PR pipelines (#5357) - -* Add code coverage diff to CI - -* remove test package from coverage diff - -* review feedback - -* lint - -* Env variable test - -* faster experiment - -* drone vars - -* drone vars - -* how does the shell work? - -* add pr comment - -* escape json - -* report diff in comment - -* properly format json - -* quote post body - -* review feedback - -* add querier/queryrange to coverage",False,"diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet -index 32cd9bf017b93..4c8e145b764f1 100644 ---- a/.drone/drone.jsonnet -+++ b/.drone/drone.jsonnet -@@ -46,15 +46,19 @@ local github_secret = secret('github_token', 'infra/data/ci/github/grafanabot', - // Injected in a secret because this is a public repository and having the config here would leak our environment names - local deploy_configuration = secret('deploy_config', 'secret/data/common/loki_ci_autodeploy', 'config.json'); - -- --local run(name, commands) = { -+local run(name, commands, env={}) = { - name: name, - image: 'grafana/loki-build-image:%s' % build_image_version, - commands: commands, -+ environment: env, - }; - --local make(target, container=true) = run(target, [ -- 'make ' + (if !container then 'BUILD_IN_CONTAINER=false ' else '') + target, -+local make(target, container=true, args=[]) = run(target, [ -+ std.join(' ', [ -+ 'make', -+ 'BUILD_IN_CONTAINER=' + container, -+ target, -+ ] + args), - ]); - - local docker(arch, app) = { -@@ -369,7 +373,23 @@ local manifest(apps) = pipeline('manifest') { - steps: [ - make('check-drone-drift', container=false) { depends_on: ['clone'] }, - make('check-generated-files', container=false) { depends_on: ['clone'] }, -- make('test', container=false) { depends_on: ['clone', 'check-generated-files'] }, -+ make('test', container=false) { depends_on: ['clone'] }, -+ run('clone-main', commands=['cd ..', 'git clone $CI_REPO_REMOTE loki-main', 'cd -']), -+ run('test-main', commands=['cd ../loki-main', 'BUILD_IN_CONTAINER=false make test']) { depends_on: ['clone-main'] }, -+ make('compare-coverage', container=false, args=[ -+ 'old=../loki-main/test_results.txt', -+ 'new=test_results.txt', -+ 'packages=ingester,distributor,querier,querier/queryrange,iter,storage,chunkenc,logql,loki', -+ '> diff.txt', -+ ]) { depends_on: ['test', 'test-main'] }, -+ run('report-coverage', commands=[ -+ ""pull=$(echo $CI_COMMIT_REF | awk -F '/' '{print $3}')"", -+ ""body=$(jq -Rs '{body: . }' diff.txt)"", -+ 'curl -X POST -u $USER:$TOKEN -H ""Accept: application/vnd.github.v3+json"" https://api.github.com/repos/grafana/loki/issues/$pull/comments -d ""$body"" > /dev/null', -+ ], env={ -+ USER: 'grafanabot', -+ TOKEN: { from_secret: github_secret.name }, -+ }) { depends_on: ['compare-coverage'] }, - make('lint', container=false) { depends_on: ['clone', 'check-generated-files'] }, - make('check-mod', container=false) { depends_on: ['clone', 'test', 'lint'] }, - { -diff --git a/.drone/drone.yml b/.drone/drone.yml -index 90a2691ee8388..a9a1b250eb5fc 100644 ---- a/.drone/drone.yml -+++ b/.drone/drone.yml -@@ -40,26 +40,67 @@ steps: - - make BUILD_IN_CONTAINER=false check-drone-drift - depends_on: - - clone -+ environment: {} - image: grafana/loki-build-image:0.20.4 - name: check-drone-drift - - commands: - - make BUILD_IN_CONTAINER=false check-generated-files - depends_on: - - clone -+ environment: {} - image: grafana/loki-build-image:0.20.4 - name: check-generated-files - - commands: - - make BUILD_IN_CONTAINER=false test - depends_on: - - clone -- - check-generated-files -+ environment: {} - image: grafana/loki-build-image:0.20.4 - name: test -+- commands: -+ - cd .. -+ - git clone $CI_REPO_REMOTE loki-main -+ - cd - -+ environment: {} -+ image: grafana/loki-build-image:0.20.4 -+ name: clone-main -+- commands: -+ - cd ../loki-main -+ - BUILD_IN_CONTAINER=false make test -+ depends_on: -+ - clone-main -+ environment: {} -+ image: grafana/loki-build-image:0.20.4 -+ name: test-main -+- commands: -+ - make BUILD_IN_CONTAINER=false compare-coverage old=../loki-main/test_results.txt -+ new=test_results.txt packages=ingester,distributor,querier,querier/queryrange,iter,storage,chunkenc,logql,loki -+ > diff.txt -+ depends_on: -+ - test -+ - test-main -+ environment: {} -+ image: grafana/loki-build-image:0.20.4 -+ name: compare-coverage -+- commands: -+ - pull=$(echo $CI_COMMIT_REF | awk -F '/' '{print $3}') -+ - 'body=$(jq -Rs ''{body: . }'' diff.txt)' -+ - 'curl -X POST -u $USER:$TOKEN -H ""Accept: application/vnd.github.v3+json"" https://api.github.com/repos/grafana/loki/issues/$pull/comments -+ -d ""$body"" > /dev/null' -+ depends_on: -+ - compare-coverage -+ environment: -+ TOKEN: -+ from_secret: github_token -+ USER: grafanabot -+ image: grafana/loki-build-image:0.20.4 -+ name: report-coverage - - commands: - - make BUILD_IN_CONTAINER=false lint - depends_on: - - clone - - check-generated-files -+ environment: {} - image: grafana/loki-build-image:0.20.4 - name: lint - - commands: -@@ -68,6 +109,7 @@ steps: - - clone - - test - - lint -+ environment: {} - image: grafana/loki-build-image:0.20.4 - name: check-mod - - commands: -@@ -79,18 +121,21 @@ steps: - depends_on: - - clone - - check-generated-files -+ environment: {} - image: grafana/loki-build-image:0.20.4 - name: loki - - commands: - - make BUILD_IN_CONTAINER=false validate-example-configs - depends_on: - - loki -+ environment: {} - image: grafana/loki-build-image:0.20.4 - name: validate-example-configs - - commands: - - make BUILD_IN_CONTAINER=false check-example-config-doc - depends_on: - - clone -+ environment: {} - image: grafana/loki-build-image:0.20.4 - name: check-example-config-doc - trigger: -@@ -109,6 +154,7 @@ steps: - - make BUILD_IN_CONTAINER=false lint-jsonnet - depends_on: - - clone -+ environment: {} - image: grafana/jsonnet-build:c8b75df - name: lint-jsonnet - trigger: -@@ -1118,6 +1164,6 @@ kind: secret - name: deploy_config - --- - kind: signature --hmac: 96966b3eec7f8976408f2c2f2c36a29b87a694c8a32afb2fdb16209e1f9e7521 -+hmac: 4596e741ac788d461b3bbb2429c1f61efabaf943aeec6b3cd59eeff8d769de5e - - ... -diff --git a/.gitignore b/.gitignore -index a017012c7ed4b..dc3b86a0317ce 100644 ---- a/.gitignore -+++ b/.gitignore -@@ -27,6 +27,7 @@ dlv - rootfs/ - dist - coverage.txt -+test_results.txt - .DS_Store - .aws-sam - .idea -@@ -40,4 +41,4 @@ coverage.txt - *.tfvars - - # vscode --.vscode -\ No newline at end of file -+.vscode -diff --git a/Makefile b/Makefile -index 278aec0cd3629..053ea578add7f 100644 ---- a/Makefile -+++ b/Makefile -@@ -10,7 +10,7 @@ - .PHONY: validate-example-configs generate-example-config-doc check-example-config-doc - .PHONY: clean clean-protos - --SHELL = /usr/bin/env bash -+SHELL = /usr/bin/env bash -o pipefail - - GOTEST ?= go test - -@@ -260,7 +260,10 @@ lint: - ######## - - test: all -- $(GOTEST) -covermode=atomic -coverprofile=coverage.txt -p=4 ./... -+ $(GOTEST) -covermode=atomic -coverprofile=coverage.txt -p=4 ./... | tee test_results.txt -+ -+compare-coverage: -+ ./tools/diff_coverage.sh $(old) $(new) $(packages) - - ######### - # Clean # -diff --git a/loki-build-image/Dockerfile b/loki-build-image/Dockerfile -index 99a7d05ac4529..9579edb2e0e67 100644 ---- a/loki-build-image/Dockerfile -+++ b/loki-build-image/Dockerfile -@@ -68,7 +68,7 @@ RUN apt-get update && \ - musl gnupg ragel \ - file zip unzip jq gettext\ - protobuf-compiler libprotobuf-dev \ -- libsystemd-dev && \ -+ libsystemd-dev jq && \ - rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - - COPY --from=docker /usr/bin/docker /usr/bin/docker -diff --git a/tools/diff_coverage.sh b/tools/diff_coverage.sh -new file mode 100755 -index 0000000000000..188821e91248b ---- /dev/null -+++ b/tools/diff_coverage.sh -@@ -0,0 +1,18 @@ -+#!/bin/bash -+ -+if [[ ! -f ""$1"" ]] || [[ ! -f ""$2"" ]]; then -+ echo ""unable to compare test coverage: both old and new files must exist"" -+ exit 0 -+fi -+ -+echo '```diff' -+for pkg in ${3//,/ }; do -+ old=$(grep ""pkg/${pkg}\s"" ""$1"" | sed s/%// | awk '{print $5}') -+ new=$(grep ""pkg/${pkg}\s"" ""$2"" | sed s/%// | awk '{print $5}') -+ echo | awk -v pkg=""${pkg}"" -v old=""${old:-0}"" -v new=""${new:-0}"" \ -+ '{ -+ sign=new - old < 0 ? ""-"" : ""+"" -+ printf (""%s %11s\t%s\n"", sign, pkg, new - old) -+ }' -+done -+echo '```'",unknown,"Introduce coverage to PR pipelines (#5357) - -* Add code coverage diff to CI - -* remove test package from coverage diff - -* review feedback - -* lint - -* Env variable test - -* faster experiment - -* drone vars - -* drone vars - -* how does the shell work? - -* add pr comment - -* escape json - -* report diff in comment - -* properly format json - -* quote post body - -* review feedback - -* add querier/queryrange to coverage" -85f7baaeda326c1f2df228c871f28cde9a4386cc,2024-02-19 22:31:59,Owen Diehl,Blooms/integration fixes (#11979),False,"diff --git a/pkg/bloomcompactor/batch.go b/pkg/bloomcompactor/batch.go -index bed0834a86b74..920bff1decc8f 100644 ---- a/pkg/bloomcompactor/batch.go -+++ b/pkg/bloomcompactor/batch.go -@@ -286,11 +286,10 @@ func (i *blockLoadingIter) loadNext() bool { - // check if there are more overlapping groups to load - if !i.overlapping.Next() { - i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]() -- return false -- } -+ if i.overlapping.Err() != nil { -+ i.err = i.overlapping.Err() -+ } - -- if i.overlapping.Err() != nil { -- i.err = i.overlapping.Err() - return false - } - -@@ -300,7 +299,7 @@ func (i *blockLoadingIter) loadNext() bool { - filtered := v1.NewFilterIter[*bloomshipper.CloseableBlockQuerier](loader, i.filter) - - iters := make([]v1.PeekingIterator[*v1.SeriesWithBloom], 0, len(blockRefs)) -- for filtered.Next() && filtered.Err() == nil { -+ for filtered.Next() { - bq := loader.At() - if _, ok := i.loaded[bq]; !ok { - i.loaded[bq] = struct{}{} -@@ -309,8 +308,9 @@ func (i *blockLoadingIter) loadNext() bool { - iters = append(iters, iter) - } - -- if loader.Err() != nil { -- i.err = loader.Err() -+ if err := filtered.Err(); err != nil { -+ i.err = err -+ i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]() - return false - } - -diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go -index 3bb1c815e8295..cc96cc7219e8d 100644 ---- a/pkg/bloomcompactor/bloomcompactor.go -+++ b/pkg/bloomcompactor/bloomcompactor.go -@@ -214,6 +214,7 @@ func (c *Compactor) ownsTenant(tenant string) (v1.FingerprintBounds, bool, error - - // runs a single round of compaction for all relevant tenants and tables - func (c *Compactor) runOne(ctx context.Context) error { -+ level.Info(c.logger).Log(""msg"", ""running bloom compaction"", ""workers"", c.cfg.WorkerParallelism) - var workersErr error - var wg sync.WaitGroup - ch := make(chan tenantTable) -@@ -226,7 +227,11 @@ func (c *Compactor) runOne(ctx context.Context) error { - err := c.loadWork(ctx, ch) - - wg.Wait() -- return multierror.New(workersErr, err, ctx.Err()).Err() -+ err = multierror.New(workersErr, err, ctx.Err()).Err() -+ if err != nil { -+ level.Error(c.logger).Log(""msg"", ""compaction iteration failed"", ""err"", err) -+ } -+ return err - } - - func (c *Compactor) tables(ts time.Time) *dayRangeIterator { -@@ -241,6 +246,7 @@ func (c *Compactor) tables(ts time.Time) *dayRangeIterator { - - fromDay := config.NewDayTime(model.TimeFromUnixNano(from)) - throughDay := config.NewDayTime(model.TimeFromUnixNano(through)) -+ level.Debug(c.logger).Log(""msg"", ""loaded tables for compaction"", ""from"", fromDay, ""through"", throughDay) - return newDayRangeIterator(fromDay, throughDay, c.schemaCfg) - } - -@@ -250,6 +256,8 @@ func (c *Compactor) loadWork(ctx context.Context, ch chan<- tenantTable) error { - for tables.Next() && tables.Err() == nil && ctx.Err() == nil { - table := tables.At() - -+ level.Debug(c.logger).Log(""msg"", ""loading work for table"", ""table"", table) -+ - tenants, err := c.tenants(ctx, table) - if err != nil { - return errors.Wrap(err, ""getting tenants"") -@@ -262,6 +270,7 @@ func (c *Compactor) loadWork(ctx context.Context, ch chan<- tenantTable) error { - if err != nil { - return errors.Wrap(err, ""checking tenant ownership"") - } -+ level.Debug(c.logger).Log(""msg"", ""enqueueing work for tenant"", ""tenant"", tenant, ""table"", table, ""ownership"", ownershipRange.String(), ""owns"", owns) - if !owns { - c.metrics.tenantsSkipped.Inc() - continue -@@ -280,12 +289,14 @@ func (c *Compactor) loadWork(ctx context.Context, ch chan<- tenantTable) error { - } - - if err := tenants.Err(); err != nil { -+ level.Error(c.logger).Log(""msg"", ""error iterating tenants"", ""err"", err) - return errors.Wrap(err, ""iterating tenants"") - } - - } - - if err := tables.Err(); err != nil { -+ level.Error(c.logger).Log(""msg"", ""error iterating tables"", ""err"", err) - return errors.Wrap(err, ""iterating tables"") - } - -@@ -330,7 +341,7 @@ func (c *Compactor) runWorkers(ctx context.Context, ch <-chan tenantTable) error - } - - func (c *Compactor) compactTenantTable(ctx context.Context, tt tenantTable) error { -- level.Info(c.logger).Log(""msg"", ""compacting"", ""org_id"", tt.tenant, ""table"", tt.table, ""ownership"", tt.ownershipRange) -+ level.Info(c.logger).Log(""msg"", ""compacting"", ""org_id"", tt.tenant, ""table"", tt.table, ""ownership"", tt.ownershipRange.String()) - return c.controller.compactTenant(ctx, tt.table, tt.tenant, tt.ownershipRange) - } - -diff --git a/pkg/bloomcompactor/controller.go b/pkg/bloomcompactor/controller.go -index ef41ec2d8efbb..2a4ff6cd45242 100644 ---- a/pkg/bloomcompactor/controller.go -+++ b/pkg/bloomcompactor/controller.go -@@ -70,7 +70,7 @@ func (s *SimpleBloomController) compactTenant( - tenant string, - ownershipRange v1.FingerprintBounds, - ) error { -- logger := log.With(s.logger, ""ownership"", ownershipRange, ""org_id"", tenant, ""table"", table.Addr()) -+ logger := log.With(s.logger, ""org_id"", tenant, ""table"", table.Addr(), ""ownership"", ownershipRange.String()) - - client, err := s.bloomStore.Client(table.ModelTime()) - if err != nil { -@@ -92,6 +92,15 @@ func (s *SimpleBloomController) compactTenant( - return errors.Wrap(err, ""failed to get metas"") - } - -+ level.Debug(logger).Log(""msg"", ""found relevant metas"", ""metas"", len(metas)) -+ -+ // fetch all metas overlapping our ownership range so we can safely -+ // check which metas can be deleted even if they only partially overlap out ownership range -+ superset, err := s.fetchSuperSet(ctx, tenant, table, ownershipRange, metas, logger) -+ if err != nil { -+ return errors.Wrap(err, ""failed to fetch superset"") -+ } -+ - // build compaction plans - work, err := s.findOutdatedGaps(ctx, tenant, table, ownershipRange, metas, logger) - if err != nil { -@@ -104,6 +113,63 @@ func (s *SimpleBloomController) compactTenant( - return errors.Wrap(err, ""failed to build gaps"") - } - -+ // combine built and superset metas -+ // in preparation for removing outdated ones -+ combined := append(superset, built...) -+ -+ outdated := outdatedMetas(combined) -+ level.Debug(logger).Log(""msg"", ""found outdated metas"", ""outdated"", len(outdated)) -+ -+ var ( -+ deletedMetas int -+ deletedBlocks int -+ ) -+ defer func() { -+ s.metrics.metasDeleted.Add(float64(deletedMetas)) -+ s.metrics.blocksDeleted.Add(float64(deletedBlocks)) -+ }() -+ -+ for _, meta := range outdated { -+ for _, block := range meta.Blocks { -+ err := client.DeleteBlocks(ctx, []bloomshipper.BlockRef{block}) -+ if err != nil { -+ if client.IsObjectNotFoundErr(err) { -+ level.Debug(logger).Log(""msg"", ""block not found while attempting delete, continuing"", ""block"", block.String()) -+ } else { -+ level.Error(logger).Log(""msg"", ""failed to delete block"", ""err"", err, ""block"", block.String()) -+ return errors.Wrap(err, ""failed to delete block"") -+ } -+ } -+ deletedBlocks++ -+ level.Debug(logger).Log(""msg"", ""removed outdated block"", ""block"", block.String()) -+ } -+ -+ err = client.DeleteMetas(ctx, []bloomshipper.MetaRef{meta.MetaRef}) -+ if err != nil { -+ if client.IsObjectNotFoundErr(err) { -+ level.Debug(logger).Log(""msg"", ""meta not found while attempting delete, continuing"", ""meta"", meta.MetaRef.String()) -+ } else { -+ level.Error(logger).Log(""msg"", ""failed to delete meta"", ""err"", err, ""meta"", meta.MetaRef.String()) -+ return errors.Wrap(err, ""failed to delete meta"") -+ } -+ } -+ deletedMetas++ -+ level.Debug(logger).Log(""msg"", ""removed outdated meta"", ""meta"", meta.MetaRef.String()) -+ } -+ -+ level.Debug(logger).Log(""msg"", ""finished compaction"") -+ return nil -+} -+ -+// fetchSuperSet fetches all metas which overlap the ownership range of the first set of metas we've resolved -+func (s *SimpleBloomController) fetchSuperSet( -+ ctx context.Context, -+ tenant string, -+ table config.DayTable, -+ ownershipRange v1.FingerprintBounds, -+ metas []bloomshipper.Meta, -+ logger log.Logger, -+) ([]bloomshipper.Meta, error) { - // in order to delete outdates metas which only partially fall within the ownership range, - // we need to fetcha all metas in the entire bound range of the first set of metas we've resolved - /* -@@ -121,12 +187,28 @@ func (s *SimpleBloomController) compactTenant( - union := superset.Union(meta.Bounds) - if len(union) > 1 { - level.Error(logger).Log(""msg"", ""meta bounds union is not a single range"", ""union"", union) -- return errors.New(""meta bounds union is not a single range"") -+ return nil, errors.New(""meta bounds union is not a single range"") - } - superset = union[0] - } - -- metas, err = s.bloomStore.FetchMetas( -+ within := superset.Within(ownershipRange) -+ level.Debug(logger).Log( -+ ""msg"", ""looking for superset metas"", -+ ""superset"", superset.String(), -+ ""superset_within"", within, -+ ) -+ -+ if within { -+ // we don't need to fetch any more metas -+ // NB(owen-d): here we copy metas into the output. This is slightly inefficient, but -+ // helps prevent mutability bugs by returning the same slice as the input. -+ results := make([]bloomshipper.Meta, len(metas)) -+ copy(results, metas) -+ return results, nil -+ } -+ -+ supersetMetas, err := s.bloomStore.FetchMetas( - ctx, - bloomshipper.MetaSearchParams{ - TenantID: tenant, -@@ -134,42 +216,20 @@ func (s *SimpleBloomController) compactTenant( - Keyspace: superset, - }, - ) -+ - if err != nil { - level.Error(logger).Log(""msg"", ""failed to get meta superset range"", ""err"", err, ""superset"", superset) -- return errors.Wrap(err, ""failed to get meta supseret range"") -+ return nil, errors.Wrap(err, ""failed to get meta supseret range"") - } - -- // combine built and pre-existing metas -- // in preparation for removing outdated metas -- metas = append(metas, built...) -- -- outdated := outdatedMetas(metas) -- for _, meta := range outdated { -- for _, block := range meta.Blocks { -- if err := client.DeleteBlocks(ctx, []bloomshipper.BlockRef{block}); err != nil { -- if client.IsObjectNotFoundErr(err) { -- level.Debug(logger).Log(""msg"", ""block not found while attempting delete, continuing"", ""block"", block) -- continue -- } -- -- level.Error(logger).Log(""msg"", ""failed to delete blocks"", ""err"", err) -- return errors.Wrap(err, ""failed to delete blocks"") -- } -- } -- -- if err := client.DeleteMetas(ctx, []bloomshipper.MetaRef{meta.MetaRef}); err != nil { -- if client.IsObjectNotFoundErr(err) { -- level.Debug(logger).Log(""msg"", ""meta not found while attempting delete, continuing"", ""meta"", meta.MetaRef) -- } else { -- level.Error(logger).Log(""msg"", ""failed to delete metas"", ""err"", err) -- return errors.Wrap(err, ""failed to delete metas"") -- } -- } -- } -- -- level.Debug(logger).Log(""msg"", ""finished compaction"") -- return nil -+ level.Debug(logger).Log( -+ ""msg"", ""found superset metas"", -+ ""metas"", len(metas), -+ ""fresh_metas"", len(supersetMetas), -+ ""delta"", len(supersetMetas)-len(metas), -+ ) - -+ return supersetMetas, nil - } - - func (s *SimpleBloomController) findOutdatedGaps( -@@ -271,6 +331,7 @@ func (s *SimpleBloomController) buildGaps( - - for i := range plan.gaps { - gap := plan.gaps[i] -+ logger := log.With(logger, ""gap"", gap.bounds.String(), ""tsdb"", plan.tsdb.Name()) - - meta := bloomshipper.Meta{ - MetaRef: bloomshipper.MetaRef{ -@@ -304,9 +365,11 @@ func (s *SimpleBloomController) buildGaps( - blocksIter, - s.rwFn, - s.metrics, -- log.With(logger, ""tsdb"", plan.tsdb.Name(), ""ownership"", gap), -+ logger, - ) - -+ level.Debug(logger).Log(""msg"", ""generating blocks"", ""overlapping_blocks"", len(gap.blocks)) -+ - newBlocks := gen.Generate(ctx) - if err != nil { - level.Error(logger).Log(""msg"", ""failed to generate bloom"", ""err"", err) -@@ -333,6 +396,16 @@ func (s *SimpleBloomController) buildGaps( - blocksIter.Close() - return nil, errors.Wrap(err, ""failed to write block"") - } -+ s.metrics.blocksCreated.Inc() -+ -+ totalGapKeyspace := (gap.bounds.Max - gap.bounds.Min) -+ progress := (built.Bounds.Max - gap.bounds.Min) -+ pct := float64(progress) / float64(totalGapKeyspace) * 100 -+ level.Debug(logger).Log( -+ ""msg"", ""uploaded block"", -+ ""block"", built.BlockRef.String(), -+ ""progress_pct"", fmt.Sprintf(""%.2f"", pct), -+ ) - - meta.Blocks = append(meta.Blocks, built.BlockRef) - } -@@ -346,6 +419,7 @@ func (s *SimpleBloomController) buildGaps( - blocksIter.Close() - - // Write the new meta -+ // TODO(owen-d): put total size in log, total time in metrics+log - ref, err := bloomshipper.MetaRefFrom(tenant, table.Addr(), gap.bounds, meta.Sources, meta.Blocks) - if err != nil { - level.Error(logger).Log(""msg"", ""failed to checksum meta"", ""err"", err) -@@ -357,8 +431,10 @@ func (s *SimpleBloomController) buildGaps( - level.Error(logger).Log(""msg"", ""failed to write meta"", ""err"", err) - return nil, errors.Wrap(err, ""failed to write meta"") - } -- created = append(created, meta) -+ s.metrics.metasCreated.Inc() -+ level.Debug(logger).Log(""msg"", ""uploaded meta"", ""meta"", meta.MetaRef.String()) - -+ created = append(created, meta) - totalSeries += uint64(seriesItrWithCounter.Count()) - } - } -diff --git a/pkg/bloomcompactor/metrics.go b/pkg/bloomcompactor/metrics.go -index 350e3ed7e480e..74378cb786429 100644 ---- a/pkg/bloomcompactor/metrics.go -+++ b/pkg/bloomcompactor/metrics.go -@@ -31,6 +31,11 @@ type Metrics struct { - tenantsCompleted *prometheus.CounterVec - tenantsCompletedTime *prometheus.HistogramVec - tenantsSeries prometheus.Histogram -+ -+ blocksCreated prometheus.Counter -+ blocksDeleted prometheus.Counter -+ metasCreated prometheus.Counter -+ metasDeleted prometheus.Counter - } - - func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics { -@@ -53,13 +58,13 @@ func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics { - compactionsStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Namespace: metricsNamespace, - Subsystem: metricsSubsystem, -- Name: ""compactions_started"", -+ Name: ""compactions_started_total"", - Help: ""Total number of compactions started"", - }), - compactionCompleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Namespace: metricsNamespace, - Subsystem: metricsSubsystem, -- Name: ""compactions_completed"", -+ Name: ""compactions_completed_total"", - Help: ""Total number of compactions completed"", - }, []string{""status""}), - compactionTime: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{ -@@ -73,7 +78,7 @@ func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics { - tenantsDiscovered: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Namespace: metricsNamespace, - Subsystem: metricsSubsystem, -- Name: ""tenants_discovered"", -+ Name: ""tenants_discovered_total"", - Help: ""Number of tenants discovered during the current compaction run"", - }), - tenantsOwned: promauto.With(r).NewCounter(prometheus.CounterOpts{ -@@ -85,19 +90,19 @@ func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics { - tenantsSkipped: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Namespace: metricsNamespace, - Subsystem: metricsSubsystem, -- Name: ""tenants_skipped"", -+ Name: ""tenants_skipped_total"", - Help: ""Number of tenants skipped since they are not owned by this instance"", - }), - tenantsStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Namespace: metricsNamespace, - Subsystem: metricsSubsystem, -- Name: ""tenants_started"", -+ Name: ""tenants_started_total"", - Help: ""Number of tenants started to process during the current compaction run"", - }), - tenantsCompleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Namespace: metricsNamespace, - Subsystem: metricsSubsystem, -- Name: ""tenants_completed"", -+ Name: ""tenants_completed_total"", - Help: ""Number of tenants successfully processed during the current compaction run"", - }, []string{""status""}), - tenantsCompletedTime: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{ -@@ -115,6 +120,30 @@ func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics { - // Up to 10M series per tenant, way more than what we expect given our max_global_streams_per_user limits - Buckets: prometheus.ExponentialBucketsRange(1, 10000000, 10), - }), -+ blocksCreated: promauto.With(r).NewCounter(prometheus.CounterOpts{ -+ Namespace: metricsNamespace, -+ Subsystem: metricsSubsystem, -+ Name: ""blocks_created_total"", -+ Help: ""Number of blocks created"", -+ }), -+ blocksDeleted: promauto.With(r).NewCounter(prometheus.CounterOpts{ -+ Namespace: metricsNamespace, -+ Subsystem: metricsSubsystem, -+ Name: ""blocks_deleted_total"", -+ Help: ""Number of blocks deleted"", -+ }), -+ metasCreated: promauto.With(r).NewCounter(prometheus.CounterOpts{ -+ Namespace: metricsNamespace, -+ Subsystem: metricsSubsystem, -+ Name: ""metas_created_total"", -+ Help: ""Number of metas created"", -+ }), -+ metasDeleted: promauto.With(r).NewCounter(prometheus.CounterOpts{ -+ Namespace: metricsNamespace, -+ Subsystem: metricsSubsystem, -+ Name: ""metas_deleted_total"", -+ Help: ""Number of metas deleted"", -+ }), - } - - return &m -diff --git a/pkg/bloomcompactor/spec.go b/pkg/bloomcompactor/spec.go -index 67d41b650e375..cb030dfb59131 100644 ---- a/pkg/bloomcompactor/spec.go -+++ b/pkg/bloomcompactor/spec.go -@@ -138,7 +138,7 @@ func (s *SimpleBloomGenerator) Generate(ctx context.Context) v1.Iterator[*v1.Blo - ) - } - -- return NewLazyBlockBuilderIterator(ctx, s.opts, s.populator(ctx), s.readWriterFn, series, s.blocksIter) -+ return NewLazyBlockBuilderIterator(ctx, s.opts, s.metrics, s.populator(ctx), s.readWriterFn, series, s.blocksIter) - } - - // LazyBlockBuilderIterator is a lazy iterator over blocks that builds -@@ -146,6 +146,7 @@ func (s *SimpleBloomGenerator) Generate(ctx context.Context) v1.Iterator[*v1.Blo - type LazyBlockBuilderIterator struct { - ctx context.Context - opts v1.BlockOptions -+ metrics *Metrics - populate func(*v1.Series, *v1.Bloom) error - readWriterFn func() (v1.BlockWriter, v1.BlockReader) - series v1.PeekingIterator[*v1.Series] -@@ -158,6 +159,7 @@ type LazyBlockBuilderIterator struct { - func NewLazyBlockBuilderIterator( - ctx context.Context, - opts v1.BlockOptions, -+ metrics *Metrics, - populate func(*v1.Series, *v1.Bloom) error, - readWriterFn func() (v1.BlockWriter, v1.BlockReader), - series v1.PeekingIterator[*v1.Series], -@@ -166,6 +168,7 @@ func NewLazyBlockBuilderIterator( - return &LazyBlockBuilderIterator{ - ctx: ctx, - opts: opts, -+ metrics: metrics, - populate: populate, - readWriterFn: readWriterFn, - series: series, -@@ -189,7 +192,7 @@ func (b *LazyBlockBuilderIterator) Next() bool { - return false - } - -- mergeBuilder := v1.NewMergeBuilder(b.blocks, b.series, b.populate) -+ mergeBuilder := v1.NewMergeBuilder(b.blocks, b.series, b.populate, b.metrics.bloomMetrics) - writer, reader := b.readWriterFn() - blockBuilder, err := v1.NewBlockBuilder(b.opts, writer) - if err != nil { -diff --git a/pkg/bloomcompactor/tsdb.go b/pkg/bloomcompactor/tsdb.go -index 6159ce02a804a..7f5ec5eab81a3 100644 ---- a/pkg/bloomcompactor/tsdb.go -+++ b/pkg/bloomcompactor/tsdb.go -@@ -236,8 +236,7 @@ func NewTSDBStores( - if err != nil { - return nil, errors.Wrap(err, ""failed to create object client"") - } -- prefix := path.Join(cfg.IndexTables.PathPrefix, cfg.IndexTables.Prefix) -- res.stores[i] = NewBloomTSDBStore(storage.NewIndexStorageClient(c, prefix)) -+ res.stores[i] = NewBloomTSDBStore(storage.NewIndexStorageClient(c, cfg.IndexTables.PathPrefix)) - } - } - -diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go -index 6bc43cf794342..e9776dfef78f5 100644 ---- a/pkg/bloomgateway/util_test.go -+++ b/pkg/bloomgateway/util_test.go -@@ -323,8 +323,7 @@ func createBlocks(t *testing.T, tenant string, n int, from, through model.Time, - MetaRef: bloomshipper.MetaRef{ - Ref: ref, - }, -- BlockTombstones: []bloomshipper.BlockRef{}, -- Blocks: []bloomshipper.BlockRef{blockRef}, -+ Blocks: []bloomshipper.BlockRef{blockRef}, - } - block, data, _ := v1.MakeBlock(t, n, fromFp, throughFp, from, through) - // Printing fingerprints and the log lines of its chunks comes handy for debugging... -diff --git a/pkg/storage/bloom/v1/builder.go b/pkg/storage/bloom/v1/builder.go -index d2d51b557e5d3..b094b847f2ef5 100644 ---- a/pkg/storage/bloom/v1/builder.go -+++ b/pkg/storage/bloom/v1/builder.go -@@ -526,6 +526,7 @@ type MergeBuilder struct { - store Iterator[*Series] - // Add chunks to a bloom - populate func(*Series, *Bloom) error -+ metrics *Metrics - } - - // NewMergeBuilder is a specific builder which does the following: -@@ -536,11 +537,13 @@ func NewMergeBuilder( - blocks Iterator[*SeriesWithBloom], - store Iterator[*Series], - populate func(*Series, *Bloom) error, -+ metrics *Metrics, - ) *MergeBuilder { - return &MergeBuilder{ - blocks: blocks, - store: store, - populate: populate, -+ metrics: metrics, - } - } - -@@ -568,6 +571,8 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) (uint32, error) { - nextInBlocks = deduped.At() - } - -+ var chunksIndexed, chunksCopied int -+ - cur := nextInBlocks - chunksToAdd := nextInStore.Chunks - // The next series from the store doesn't exist in the blocks, so we add it -@@ -583,8 +588,11 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) (uint32, error) { - } else { - // if the series already exists in the block, we only need to add the new chunks - chunksToAdd = nextInStore.Chunks.Unless(nextInBlocks.Series.Chunks) -+ chunksCopied = len(nextInStore.Chunks) - len(chunksToAdd) - } - -+ chunksIndexed = len(chunksToAdd) -+ - if len(chunksToAdd) > 0 { - if err := mb.populate( - &Series{ -@@ -597,6 +605,9 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) (uint32, error) { - } - } - -+ mb.metrics.chunksIndexed.WithLabelValues(chunkIndexedTypeIterated).Add(float64(chunksIndexed)) -+ mb.metrics.chunksIndexed.WithLabelValues(chunkIndexedTypeCopied).Add(float64(chunksCopied)) -+ - blockFull, err := builder.AddSeries(*cur) - if err != nil { - return 0, errors.Wrap(err, ""adding series to block"") -@@ -606,6 +617,10 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) (uint32, error) { - } - } - -+ if err := mb.store.Err(); err != nil { -+ return 0, errors.Wrap(err, ""iterating store"") -+ } -+ - checksum, err := builder.Close() - if err != nil { - return 0, errors.Wrap(err, ""closing block"") -diff --git a/pkg/storage/bloom/v1/builder_test.go b/pkg/storage/bloom/v1/builder_test.go -index 0122a35f7751c..0013ad8744579 100644 ---- a/pkg/storage/bloom/v1/builder_test.go -+++ b/pkg/storage/bloom/v1/builder_test.go -@@ -226,7 +226,7 @@ func TestMergeBuilder(t *testing.T) { - ) - - // Ensure that the merge builder combines all the blocks correctly -- mergeBuilder := NewMergeBuilder(dedupedBlocks(blocks), storeItr, pop) -+ mergeBuilder := NewMergeBuilder(dedupedBlocks(blocks), storeItr, pop, NewMetrics(nil)) - indexBuf := bytes.NewBuffer(nil) - bloomsBuf := bytes.NewBuffer(nil) - writer := NewMemoryBlockWriter(indexBuf, bloomsBuf) -@@ -400,6 +400,7 @@ func TestMergeBuilder_Roundtrip(t *testing.T) { - // We're not actually indexing new data in this test - return nil - }, -+ NewMetrics(nil), - ) - builder, err := NewBlockBuilder(DefaultBlockOptions, writer) - require.Nil(t, err) -diff --git a/pkg/storage/bloom/v1/index.go b/pkg/storage/bloom/v1/index.go -index e3a14dc5453ea..58d43b8cd0aca 100644 ---- a/pkg/storage/bloom/v1/index.go -+++ b/pkg/storage/bloom/v1/index.go -@@ -234,8 +234,8 @@ func aggregateHeaders(xs []SeriesHeader) SeriesHeader { - Bounds: NewBounds(fromFp, throughFP), - } - -- for _, x := range xs { -- if x.FromTs < res.FromTs { -+ for i, x := range xs { -+ if i == 0 || x.FromTs < res.FromTs { - res.FromTs = x.FromTs - } - if x.ThroughTs > res.ThroughTs { -diff --git a/pkg/storage/bloom/v1/metrics.go b/pkg/storage/bloom/v1/metrics.go -index aa604c29f1573..f5568a9d76596 100644 ---- a/pkg/storage/bloom/v1/metrics.go -+++ b/pkg/storage/bloom/v1/metrics.go -@@ -10,12 +10,16 @@ type Metrics struct { - bloomSize prometheus.Histogram // size of the bloom filter in bytes - hammingWeightRatio prometheus.Histogram // ratio of the hamming weight of the bloom filter to the number of bits in the bloom filter - estimatedCount prometheus.Histogram // estimated number of elements in the bloom filter -+ chunksIndexed *prometheus.CounterVec - } - -+const chunkIndexedTypeIterated = ""iterated"" -+const chunkIndexedTypeCopied = ""copied"" -+ - func NewMetrics(r prometheus.Registerer) *Metrics { - return &Metrics{ - sbfCreationTime: promauto.With(r).NewCounter(prometheus.CounterOpts{ -- Name: ""bloom_creation_time"", -+ Name: ""bloom_creation_time_total"", - Help: ""Time spent creating scalable bloom filters"", - }), - bloomSize: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ -@@ -33,5 +37,9 @@ func NewMetrics(r prometheus.Registerer) *Metrics { - Help: ""Estimated number of elements in the bloom filter"", - Buckets: prometheus.ExponentialBucketsRange(1, 33554432, 10), - }), -+ chunksIndexed: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ -+ Name: ""bloom_chunks_indexed_total"", -+ Help: ""Number of chunks indexed in bloom filters, partitioned by type. Type can be iterated or copied, where iterated indicates the chunk data was fetched and ngrams for it's contents generated whereas copied indicates the chunk already existed in another source block and was copied to the new block"", -+ }, []string{""type""}), - } - } -diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go -index 882b0eab41c24..240f2b5166588 100644 ---- a/pkg/storage/stores/shipper/bloomshipper/client.go -+++ b/pkg/storage/stores/shipper/bloomshipper/client.go -@@ -88,10 +88,6 @@ type Meta struct { - // The specific TSDB files used to generate the block. - Sources []tsdb.SingleTenantTSDBIdentifier - -- // TODO(owen-d): remove, unused -- // Old blocks which can be deleted in the future. These should be from previous compaction rounds. -- BlockTombstones []BlockRef -- - // A list of blocks that were generated - Blocks []BlockRef - } -diff --git a/pkg/storage/stores/shipper/bloomshipper/client_test.go b/pkg/storage/stores/shipper/bloomshipper/client_test.go -index 897ed519946a7..e5bbe3b5b1bf5 100644 ---- a/pkg/storage/stores/shipper/bloomshipper/client_test.go -+++ b/pkg/storage/stores/shipper/bloomshipper/client_test.go -@@ -63,8 +63,7 @@ func putMeta(c *BloomClient, tenant string, start model.Time, minFp, maxFp model - // EndTimestamp: start.Add(12 * time.Hour), - }, - }, -- Blocks: []BlockRef{}, -- BlockTombstones: []BlockRef{}, -+ Blocks: []BlockRef{}, - } - raw, _ := json.Marshal(meta) - return meta, c.client.PutObject(context.Background(), c.Meta(meta.MetaRef).Addr(), bytes.NewReader(raw)) -@@ -129,8 +128,7 @@ func TestBloomClient_PutMeta(t *testing.T) { - // EndTimestamp: start.Add(12 * time.Hour), - }, - }, -- Blocks: []BlockRef{}, -- BlockTombstones: []BlockRef{}, -+ Blocks: []BlockRef{}, - } - - err := c.PutMeta(ctx, meta) -diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go -index 40a695e0b8e6c..962bebb9956fd 100644 ---- a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go -+++ b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go -@@ -34,8 +34,7 @@ func makeMetas(t *testing.T, schemaCfg config.SchemaConfig, ts model.Time, keysp - EndTimestamp: ts, - }, - }, -- BlockTombstones: []BlockRef{}, -- Blocks: []BlockRef{}, -+ Blocks: []BlockRef{}, - } - } - return metas -diff --git a/pkg/storage/stores/shipper/bloomshipper/resolver.go b/pkg/storage/stores/shipper/bloomshipper/resolver.go -index 40a59cee42dbc..7d224b9f01392 100644 ---- a/pkg/storage/stores/shipper/bloomshipper/resolver.go -+++ b/pkg/storage/stores/shipper/bloomshipper/resolver.go -@@ -14,6 +14,9 @@ const ( - BloomPrefix = ""bloom"" - MetasPrefix = ""metas"" - BlocksPrefix = ""blocks"" -+ -+ extTarGz = "".tar.gz"" -+ extJSON = "".json"" - ) - - // KeyResolver is an interface for resolving keys to locations. -@@ -36,7 +39,7 @@ func (defaultKeyResolver) Meta(ref MetaRef) Location { - fmt.Sprintf(""%v"", ref.TableName), - ref.TenantID, - MetasPrefix, -- fmt.Sprintf(""%v-%v"", ref.Bounds, ref.Checksum), -+ fmt.Sprintf(""%v-%x%s"", ref.Bounds, ref.Checksum, extJSON), - } - } - -@@ -50,7 +53,8 @@ func (defaultKeyResolver) ParseMetaKey(loc Location) (MetaRef, error) { - if err != nil { - return MetaRef{}, fmt.Errorf(""failed to parse bounds of meta key %s : %w"", loc, err) - } -- checksum, err := strconv.ParseUint(fnParts[2], 16, 64) -+ withoutExt := strings.TrimSuffix(fnParts[2], extJSON) -+ checksum, err := strconv.ParseUint(withoutExt, 16, 64) - if err != nil { - return MetaRef{}, fmt.Errorf(""failed to parse checksum of meta key %s : %w"", loc, err) - } -@@ -77,7 +81,7 @@ func (defaultKeyResolver) Block(ref BlockRef) Location { - ref.TenantID, - BlocksPrefix, - ref.Bounds.String(), -- fmt.Sprintf(""%d-%d-%x"", ref.StartTimestamp, ref.EndTimestamp, ref.Checksum), -+ fmt.Sprintf(""%d-%d-%x%s"", ref.StartTimestamp, ref.EndTimestamp, ref.Checksum, extTarGz), - } - } - -diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper.go b/pkg/storage/stores/shipper/bloomshipper/shipper.go -index fd755b0a204a7..3267886ac063e 100644 ---- a/pkg/storage/stores/shipper/bloomshipper/shipper.go -+++ b/pkg/storage/stores/shipper/bloomshipper/shipper.go -@@ -55,30 +55,15 @@ func (s *Shipper) Stop() { - } - - // BlocksForMetas returns all the blocks from all the metas listed that are within the requested bounds --// and not tombstoned in any of the metas --func BlocksForMetas(metas []Meta, interval Interval, keyspaces []v1.FingerprintBounds) []BlockRef { -- blocks := make(map[BlockRef]bool) // block -> isTombstoned -- -+func BlocksForMetas(metas []Meta, interval Interval, keyspaces []v1.FingerprintBounds) (refs []BlockRef) { - for _, meta := range metas { -- for _, tombstone := range meta.BlockTombstones { -- blocks[tombstone] = true -- } - for _, block := range meta.Blocks { -- tombstoned, ok := blocks[block] -- if ok && tombstoned { -- // skip tombstoned blocks -- continue -+ if !isOutsideRange(block, interval, keyspaces) { -+ refs = append(refs, block) - } -- blocks[block] = false - } - } - -- refs := make([]BlockRef, 0, len(blocks)) -- for ref, tombstoned := range blocks { -- if !tombstoned && !isOutsideRange(ref, interval, keyspaces) { -- refs = append(refs, ref) -- } -- } - sort.Slice(refs, func(i, j int) bool { - return refs[i].Bounds.Less(refs[j].Bounds) - }) -diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go -index c9e47f91fea28..e03d72c26ba37 100644 ---- a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go -+++ b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go -@@ -14,49 +14,6 @@ import ( - ) - - func TestBloomShipper_findBlocks(t *testing.T) { -- t.Run(""expected block that are specified in tombstones to be filtered out"", func(t *testing.T) { -- metas := []Meta{ -- { -- Blocks: []BlockRef{ -- //this blockRef is marked as deleted in the next meta -- createMatchingBlockRef(1), -- createMatchingBlockRef(2), -- }, -- }, -- { -- Blocks: []BlockRef{ -- //this blockRef is marked as deleted in the next meta -- createMatchingBlockRef(3), -- createMatchingBlockRef(4), -- }, -- }, -- { -- BlockTombstones: []BlockRef{ -- createMatchingBlockRef(1), -- createMatchingBlockRef(3), -- }, -- Blocks: []BlockRef{ -- createMatchingBlockRef(5), -- }, -- }, -- } -- -- ts := model.Now() -- -- interval := NewInterval( -- ts.Add(-2*time.Hour), -- ts.Add(-1*time.Hour), -- ) -- blocks := BlocksForMetas(metas, interval, []v1.FingerprintBounds{{Min: 100, Max: 200}}) -- -- expectedBlockRefs := []BlockRef{ -- createMatchingBlockRef(2), -- createMatchingBlockRef(4), -- createMatchingBlockRef(5), -- } -- require.ElementsMatch(t, expectedBlockRefs, blocks) -- }) -- - tests := map[string]struct { - minFingerprint uint64 - maxFingerprint uint64 -diff --git a/pkg/storage/stores/shipper/bloomshipper/store_test.go b/pkg/storage/stores/shipper/bloomshipper/store_test.go -index ca86cb94fa963..c99aa46df4bf3 100644 ---- a/pkg/storage/stores/shipper/bloomshipper/store_test.go -+++ b/pkg/storage/stores/shipper/bloomshipper/store_test.go -@@ -83,8 +83,7 @@ func createMetaInStorage(store *BloomStore, tenant string, start model.Time, min - // EndTimestamp: start.Add(12 * time.Hour), - }, - }, -- Blocks: []BlockRef{}, -- BlockTombstones: []BlockRef{}, -+ Blocks: []BlockRef{}, - } - err := store.storeDo(start, func(s *bloomStoreEntry) error { - raw, _ := json.Marshal(meta) -diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go -index 9627718aa8ec7..00ee2e152144a 100644 ---- a/pkg/validation/limits.go -+++ b/pkg/validation/limits.go -@@ -339,7 +339,12 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { - f.IntVar(&l.BloomGatewayBlocksDownloadingParallelism, ""bloom-gateway.blocks-downloading-parallelism"", 50, ""Maximum number of blocks will be downloaded in parallel by the Bloom Gateway."") - f.DurationVar(&l.BloomGatewayCacheKeyInterval, ""bloom-gateway.cache-key-interval"", 15*time.Minute, ""Interval for computing the cache key in the Bloom Gateway."") - _ = l.BloomCompactorMaxBlockSize.Set(defaultBloomCompactorMaxBlockSize) -- f.Var(&l.BloomCompactorMaxBlockSize, ""bloom-compactor.max-block-size"", ""The maximum bloom block size. A value of 0 sets an unlimited size. Default is 200MB. The actual block size might exceed this limit since blooms will be added to blocks until the block exceeds the maximum block size."") -+ f.Var(&l.BloomCompactorMaxBlockSize, ""bloom-compactor.max-block-size"", -+ fmt.Sprintf( -+ ""The maximum bloom block size. A value of 0 sets an unlimited size. Default is %s. The actual block size might exceed this limit since blooms will be added to blocks until the block exceeds the maximum block size."", -+ defaultBloomCompactorMaxBlockSize, -+ ), -+ ) - - l.ShardStreams = &shardstreams.Config{} - l.ShardStreams.RegisterFlagsWithPrefix(""shard-streams"", f)",unknown,Blooms/integration fixes (#11979) -8041bd29b90a79066f7c6393fef1db5ba29440b0,2024-08-13 23:26:30,renovate[bot],"fix(deps): update module github.com/azure/go-autorest/autorest/adal to v0.9.24 (#13862) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> -Co-authored-by: Paul Rogers <129207811+paul1r@users.noreply.github.com>",False,"diff --git a/go.mod b/go.mod -index 2b5058d6ba31c..1f21f95ab16fc 100644 ---- a/go.mod -+++ b/go.mod -@@ -10,7 +10,7 @@ require ( - cloud.google.com/go/storage v1.41.0 - github.com/Azure/azure-pipeline-go v0.2.3 - github.com/Azure/azure-storage-blob-go v0.14.0 -- github.com/Azure/go-autorest/autorest/adal v0.9.23 -+ github.com/Azure/go-autorest/autorest/adal v0.9.24 - github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 - github.com/Masterminds/sprig/v3 v3.2.3 - github.com/NYTimes/gziphandler v1.1.1 -diff --git a/go.sum b/go.sum -index e85019be11f65..97892d0431229 100644 ---- a/go.sum -+++ b/go.sum -@@ -198,8 +198,8 @@ github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQW - github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= - github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= - github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= --github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= --github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= -+github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4= -+github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8= - github.com/Azure/go-autorest/autorest/azure/auth v0.1.0/go.mod h1:Gf7/i2FUpyb/sGBLIFxTBzrNzBo7aPXXE3ZVeDRwdpM= - github.com/Azure/go-autorest/autorest/azure/auth v0.4.1/go.mod h1:5TgH20II424SXIV9YDBsO4rBCKsh39Vbx9DvhJZZ8rU= - github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= -diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md -index b11eb07884b05..97434ea7f7709 100644 ---- a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md -+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md -@@ -160,7 +160,7 @@ if (err == nil) { - ```Go - certificatePath := ""./example-app.pfx"" - --certData, err := ioutil.ReadFile(certificatePath) -+certData, err := os.ReadFile(certificatePath) - if err != nil { - return nil, fmt.Errorf(""failed to read the certificate file (%s): %v"", certificatePath, err) - } -diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go -index 9daa4b58b881e..f040e2ac6b45f 100644 ---- a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go -+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go -@@ -27,7 +27,7 @@ import ( - ""context"" - ""encoding/json"" - ""fmt"" -- ""io/ioutil"" -+ ""io"" - ""net/http"" - ""net/url"" - ""strings"" -@@ -116,7 +116,7 @@ func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConf - } - - s := v.Encode() -- body := ioutil.NopCloser(strings.NewReader(s)) -+ body := io.NopCloser(strings.NewReader(s)) - - req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) - if err != nil { -@@ -131,7 +131,7 @@ func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConf - } - defer resp.Body.Close() - -- rb, err := ioutil.ReadAll(resp.Body) -+ rb, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf(""%s %s: %s"", logPrefix, errCodeHandlingFails, err.Error()) - } -@@ -175,7 +175,7 @@ func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code - } - - s := v.Encode() -- body := ioutil.NopCloser(strings.NewReader(s)) -+ body := io.NopCloser(strings.NewReader(s)) - - req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) - if err != nil { -@@ -190,7 +190,7 @@ func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code - } - defer resp.Body.Close() - -- rb, err := ioutil.ReadAll(resp.Body) -+ rb, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf(""%s %s: %s"", logPrefix, errTokenHandlingFails, err.Error()) - } -diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go -index 2a974a39b3cd4..fb54a43235baf 100644 ---- a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go -+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go -@@ -20,7 +20,6 @@ import ( - ""encoding/json"" - ""errors"" - ""fmt"" -- ""io/ioutil"" - ""os"" - ""path/filepath"" - -@@ -62,7 +61,7 @@ func SaveToken(path string, mode os.FileMode, token Token) error { - return fmt.Errorf(""failed to create directory (%s) to store token in: %v"", dir, err) - } - -- newFile, err := ioutil.TempFile(dir, ""token"") -+ newFile, err := os.CreateTemp(dir, ""token"") - if err != nil { - return fmt.Errorf(""failed to create the temp file to write the token: %v"", err) - } -diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go -index 2a24ab80cf16c..67baecd83ffe5 100644 ---- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go -+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go -@@ -25,7 +25,6 @@ import ( - ""errors"" - ""fmt"" - ""io"" -- ""io/ioutil"" - ""math"" - ""net/http"" - ""net/url"" -@@ -1061,7 +1060,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource - } else if msiSecret.clientResourceID != """" { - data.Set(""msi_res_id"", msiSecret.clientResourceID) - } -- req.Body = ioutil.NopCloser(strings.NewReader(data.Encode())) -+ req.Body = io.NopCloser(strings.NewReader(data.Encode())) - req.Header.Set(""Content-Type"", ""application/x-www-form-urlencoded"") - break - case msiTypeIMDS: -@@ -1096,7 +1095,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource - } - - s := v.Encode() -- body := ioutil.NopCloser(strings.NewReader(s)) -+ body := io.NopCloser(strings.NewReader(s)) - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - req.Body = body -@@ -1113,7 +1112,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource - - logger.Instance.WriteResponse(resp, logger.Filter{Body: authBodyFilter}) - defer resp.Body.Close() -- rb, err := ioutil.ReadAll(resp.Body) -+ rb, err := io.ReadAll(resp.Body) - - if resp.StatusCode != http.StatusOK { - if err != nil { -@@ -1235,7 +1234,7 @@ func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http - - for attempt < maxAttempts { - if resp != nil && resp.Body != nil { -- io.Copy(ioutil.Discard, resp.Body) -+ io.Copy(io.Discard, resp.Body) - resp.Body.Close() - } - resp, err = sender.Do(req) -diff --git a/vendor/modules.txt b/vendor/modules.txt -index 63e213615c8b5..294f67708db38 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -137,7 +137,7 @@ github.com/Azure/go-autorest - ## explicit; go 1.15 - github.com/Azure/go-autorest/autorest - github.com/Azure/go-autorest/autorest/azure --# github.com/Azure/go-autorest/autorest/adal v0.9.23 -+# github.com/Azure/go-autorest/autorest/adal v0.9.24 - ## explicit; go 1.15 - github.com/Azure/go-autorest/autorest/adal - # github.com/Azure/go-autorest/autorest/azure/auth v0.5.13",fix,"update module github.com/azure/go-autorest/autorest/adal to v0.9.24 (#13862) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> -Co-authored-by: Paul Rogers <129207811+paul1r@users.noreply.github.com>" -9ffee5148b7d369243149da236bb9befa4bcc637,2024-09-23 21:08:28,nicolevanderhoeven,"docs: Added new Query best practices page (#14057) - -Co-authored-by: J Stickler ",False,"diff --git a/docs/sources/query/bp-query.md b/docs/sources/query/bp-query.md -new file mode 100644 -index 0000000000000..819fdc0a76b06 ---- /dev/null -+++ b/docs/sources/query/bp-query.md -@@ -0,0 +1,80 @@ -+--- -+title: Query best practices -+menuTitle: Query best practices -+description: Describes best practices for querying in Grafana Loki. -+aliases: -+- ../bp-query -+weight: 700 -+--- -+# Query best practices -+ -+The way you write queries in Loki affects how quickly you get results returned from those queries. Understanding the way Loki parses queries can help you write queries that are efficient and performant. -+ -+{{< admonition type=""tip"" >}} -+Before you start optimizing queries, read the [labels best practices](https://grafana.com/docs/loki//get-started/labels/bp-labels/) page to understand what makes a good label. Choosing the right labels is the first step towards writing efficient queries. -+{{< /admonition >}} -+ -+Loki evaluates a LogQL query from left to right, in the order that it is written. To get the best possible query performance, eliminate as many potential results as you can earlier in the query and then continue to progressively narrow your search as you continue writing the query. This page describes the recommended order for writing queries that efficiently filter out unwanted results. -+ -+## Narrow down your time range first -+ -+Reduce the number of logs Loki needs to look through by specifying a period of time that you'd like to search through. Loki creates one index file per day, so queries that span over multiple days fetches multiple index files. The fewer files Loki has to search, the faster the query results are returned. -+ -+Time ranges are typically not part of the query, but you can set a time range through your visualization tool or through [the Loki API](https://grafana.com/docs/loki//reference/loki-http-api/). -+ -+ -+### In Grafana -+ -+If you're using Loki with Grafana, you can use the dropdown menu on the upper right hand corner of a dashboard to select a time range, either relative (last X hours) or absolute (a specific date and time). -+ -+![Screenshot of time selector on Grafana](../grafana-time-range-picker.png ""Grafana time interval selector"") -+ -+### Through Loki API -+ -+If you're querying Loki through [the Loki API](https://grafana.com/docs/loki//reference/loki-http-api/), you can use the [`query_range` endpoint]({{https://grafana.com/docs/loki//reference/loki-http-api/#query-logs-within-a-range-of-time"" >}}) to add `start` and `end` timestamps for your query as parameters to the HTTP call rather than as part of the query itself. -+ -+```bash -+http:///loki/api/v1/query_range?query={job=""app""}&start=1633017600000000000&end=1633104000000000000 -+ -+``` -+ -+ -+## Use precise label selectors -+ -+Next, write your label selectors. Identify the most specific label you can use within the log line and search based on that first. For example, if the logs contain the labels `namespace` and `app_name` and the latter is a smaller subset of data, start your query by selecting based on `app_name`: -+ -+```bash -+{app_name=""carnivorousgreenhouse""} -+``` -+ -+Using the most specific label selector has the added benefit of reducing the length of your query. Since `app_name` is more specific than `namespace`, you don't need to add a selector for `namespace`. Adding more general label selectors has no further effect on the query. -+ -+ -+## Use simple line filter expressions over regular expressions -+ -+When using [line filter expressions](https://grafana.com/docs/loki//query/log_queries/#line-filter-expression), prefer the simpler filter operators such as: -+- `|=` (contains string) and -+- `!=` (does not contain string) -+over the regular expression filter operators: -+- `|~` (matches the regular expression) -+- `!~` (does not match the regular expression) -+ -+Loki evaluates the first two filter expressions faster than it can evaluate regular expressions, so always try to rewrite your query in terms of whether a log line contains or does not contain a certain string. Use regular expressions only as a last resort. -+ -+Line filter expressions are more efficient than parser expressions. -+ -+## Avoid using complex text parsers -+ -+Use [parser expressions](https://grafana.com/docs/loki//query/log_queries/#parser-expression) only after line filter expressions. Parser expressions are ways to look through the log line and extract labels in different formats, which can be useful but are also more intensive for Loki to do than line filter expressions. Using them after line filter expressions means that Loki only needs to evaluate parser expressions for log lines that match the line filter expression, reducing the amount of logs that Loki needs to search through. -+ -+Parser expressions include [JSON](https://grafana.com/docs/loki//query/log_queries/#json, [logfmt](https://grafana.com/docs/loki//query/log_queries/#logfmt), [pattern](https://grafana.com/docs/loki//query/log_queries/#pattern), [regexp](https://grafana.com/docs/loki//query/log_queries/#regular-expression), and [unpack](https://grafana.com/docs/loki//query/log_queries/#unpack) parsers. -+ -+## Use recording rules -+ -+Some queries are sufficiently complex, or some datasets sufficiently large, that there is a limit as to how much query performance can be optimized. If you're following the tips on this page and are still experiencing slow query times, consider creating a [recording rule](https://grafana.com/docs/loki//operations/recording-rules/) for them. A recording rule runs a query at a predetermined time and also precomputes the results of that query, saving those results for faster retrieval later. -+ -+## Further resources -+ -+- [Watch: 5 tips for improving Grafana Loki query performance](https://grafana.com/blog/2023/01/10/watch-5-tips-for-improving-grafana-loki-query-performance/) -+- [Grafana Loki Design Basics with Ed Welch (Grafana Office Hours #27)](https://www.youtube.com/live/3uFMJLufgSo?feature=shared&t=3385) -+- [Labels best practices](https://grafana.com/docs/loki//get-started/labels/bp-labels/) -\ No newline at end of file -diff --git a/docs/sources/query/grafana-time-range-picker.png b/docs/sources/query/grafana-time-range-picker.png -new file mode 100644 -index 0000000000000..f207e5c0ccec7 -Binary files /dev/null and b/docs/sources/query/grafana-time-range-picker.png differ",docs,"Added new Query best practices page (#14057) - -Co-authored-by: J Stickler " -6dce98870d8c5c7054b3444d2fe4e66dad262a53,2024-04-18 20:01:26,Michel Hollands,"fix: Fix the lokitool imports (#12673) - -Signed-off-by: Michel Hollands ",False,"diff --git a/cmd/lokitool/main.go b/cmd/lokitool/main.go -index 155705b07afa7..6b52fb0a3d657 100644 ---- a/cmd/lokitool/main.go -+++ b/cmd/lokitool/main.go -@@ -8,7 +8,7 @@ import ( - - ""github.com/prometheus/common/version"" - -- ""github.com/grafana/loki/pkg/tool/commands"" -+ ""github.com/grafana/loki/v3/pkg/tool/commands"" - ) - - var ( -diff --git a/pkg/tool/client/rules.go b/pkg/tool/client/rules.go -index 40dd0e1a292be..d662794d81254 100644 ---- a/pkg/tool/client/rules.go -+++ b/pkg/tool/client/rules.go -@@ -10,7 +10,7 @@ import ( - log ""github.com/sirupsen/logrus"" - ""gopkg.in/yaml.v3"" - -- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt"" -+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"" - ) - - // CreateRuleGroup creates a new rule group -diff --git a/pkg/tool/commands/rules.go b/pkg/tool/commands/rules.go -index d1e16c026b2a6..4abc14162eddd 100644 ---- a/pkg/tool/commands/rules.go -+++ b/pkg/tool/commands/rules.go -@@ -15,10 +15,10 @@ import ( - ""gopkg.in/alecthomas/kingpin.v2"" - yamlv3 ""gopkg.in/yaml.v3"" - -- ""github.com/grafana/loki/pkg/tool/client"" -- ""github.com/grafana/loki/pkg/tool/printer"" -- ""github.com/grafana/loki/pkg/tool/rules"" -- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt"" -+ ""github.com/grafana/loki/v3/pkg/tool/client"" -+ ""github.com/grafana/loki/v3/pkg/tool/printer"" -+ ""github.com/grafana/loki/v3/pkg/tool/rules"" -+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"" - ) - - const ( -diff --git a/pkg/tool/commands/rules_test.go b/pkg/tool/commands/rules_test.go -index d1878f856cf5c..fe27da35f9d37 100644 ---- a/pkg/tool/commands/rules_test.go -+++ b/pkg/tool/commands/rules_test.go -@@ -7,7 +7,7 @@ import ( - ""github.com/stretchr/testify/assert"" - ""gopkg.in/yaml.v3"" - -- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt"" -+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"" - ) - - func TestCheckDuplicates(t *testing.T) { -diff --git a/pkg/tool/printer/printer.go b/pkg/tool/printer/printer.go -index f85bd835a85de..084d483a07a45 100644 ---- a/pkg/tool/printer/printer.go -+++ b/pkg/tool/printer/printer.go -@@ -13,8 +13,8 @@ import ( - ""github.com/mitchellh/colorstring"" - ""gopkg.in/yaml.v3"" - -- ""github.com/grafana/loki/pkg/tool/rules"" -- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt"" -+ ""github.com/grafana/loki/v3/pkg/tool/rules"" -+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"" - ) - - // Printer is used for printing formatted output from the cortextool -diff --git a/pkg/tool/printer/printer_test.go b/pkg/tool/printer/printer_test.go -index 5c9a84899cd35..c8650d9bd6101 100644 ---- a/pkg/tool/printer/printer_test.go -+++ b/pkg/tool/printer/printer_test.go -@@ -9,7 +9,7 @@ import ( - ""github.com/stretchr/testify/assert"" - ""github.com/stretchr/testify/require"" - -- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt"" -+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"" - ) - - func TestPrintRuleSet(t *testing.T) { -diff --git a/pkg/tool/rules/compare.go b/pkg/tool/rules/compare.go -index 728726037acbd..2d64c534e88d1 100644 ---- a/pkg/tool/rules/compare.go -+++ b/pkg/tool/rules/compare.go -@@ -10,7 +10,7 @@ import ( - ""github.com/prometheus/prometheus/model/rulefmt"" - yaml ""gopkg.in/yaml.v3"" - -- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt"" -+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"" - ) - - var ( -diff --git a/pkg/tool/rules/compare_test.go b/pkg/tool/rules/compare_test.go -index 0dfda624489b8..4df1aa2ee67af 100644 ---- a/pkg/tool/rules/compare_test.go -+++ b/pkg/tool/rules/compare_test.go -@@ -6,7 +6,7 @@ import ( - ""github.com/prometheus/prometheus/model/rulefmt"" - yaml ""gopkg.in/yaml.v3"" - -- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt"" -+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"" - ) - - func Test_rulesEqual(t *testing.T) { -diff --git a/pkg/tool/rules/parser.go b/pkg/tool/rules/parser.go -index f4017c049f6ef..aa8f833630091 100644 ---- a/pkg/tool/rules/parser.go -+++ b/pkg/tool/rules/parser.go -@@ -12,7 +12,7 @@ import ( - log ""github.com/sirupsen/logrus"" - yaml ""gopkg.in/yaml.v3"" - -- ""github.com/grafana/loki/pkg/ruler"" -+ ""github.com/grafana/loki/v3/pkg/ruler"" - ) - - const ( -diff --git a/pkg/tool/rules/parser_test.go b/pkg/tool/rules/parser_test.go -index 68f9ff6d70f80..35db097486a81 100644 ---- a/pkg/tool/rules/parser_test.go -+++ b/pkg/tool/rules/parser_test.go -@@ -6,7 +6,7 @@ import ( - - ""github.com/prometheus/prometheus/model/rulefmt"" - -- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt"" -+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"" - ) - - func TestParseFiles(t *testing.T) { -diff --git a/pkg/tool/rules/rules.go b/pkg/tool/rules/rules.go -index e2c216317c546..eccfbdabe45a4 100644 ---- a/pkg/tool/rules/rules.go -+++ b/pkg/tool/rules/rules.go -@@ -8,9 +8,9 @@ import ( - ""github.com/prometheus/prometheus/promql/parser"" - log ""github.com/sirupsen/logrus"" - -- logql ""github.com/grafana/loki/pkg/logql/syntax"" -+ logql ""github.com/grafana/loki/v3/pkg/logql/syntax"" - -- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt"" -+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"" - ) - - // RuleNamespace is used to parse a slightly modified prometheus -diff --git a/pkg/tool/rules/rules_test.go b/pkg/tool/rules/rules_test.go -index 690316db2d182..fba13040d49b8 100644 ---- a/pkg/tool/rules/rules_test.go -+++ b/pkg/tool/rules/rules_test.go -@@ -8,7 +8,7 @@ import ( - ""gopkg.in/yaml.v3"" - ""gotest.tools/assert"" - -- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt"" -+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"" - ) - - func TestAggregateBy(t *testing.T) {",fix,"Fix the lokitool imports (#12673) - -Signed-off-by: Michel Hollands " -3d2282745bf121377ade603432347e6ca23b9235,2022-05-25 02:15:14,Callum Styan,"Runtime reloadable config; ring migration setup (#6214) - -* Clean up setting of memberlist and multikv config for Loki services that -use the ring. Also sets the multi client runtime config function for all -services that use the ring. - -Signed-off-by: Callum Styan - -* Add a test for the multi kv setup - -Signed-off-by: Callum Styan - -* Fix lint issues. - -Signed-off-by: Callum Styan ",False,"diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go -index 5def8a0cbcb3c..7654a1385e714 100644 ---- a/pkg/loki/modules.go -+++ b/pkg/loki/modules.go -@@ -135,8 +135,6 @@ func (t *Loki) initServer() (services.Service, error) { - } - - func (t *Loki) initRing() (_ services.Service, err error) { -- t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig) -- t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - t.ring, err = ring.New(t.Cfg.Ingester.LifecyclerConfig.RingConfig, ""ingester"", ingester.RingKey, util_log.Logger, prometheus.WrapRegistererWithPrefix(""cortex_"", prometheus.DefaultRegisterer)) - if err != nil { - return -@@ -164,6 +162,19 @@ func (t *Loki) initRuntimeConfig() (services.Service, error) { - var err error - t.runtimeConfig, err = runtimeconfig.New(t.Cfg.RuntimeConfig, prometheus.WrapRegistererWithPrefix(""loki_"", prometheus.DefaultRegisterer), util_log.Logger) - t.TenantLimits = newtenantLimitsFromRuntimeConfig(t.runtimeConfig) -+ -+ // Update config fields using runtime config. Only if multiKV is used for given ring these returned functions will be -+ // called and register the listener. -+ // -+ // By doing the initialization here instead of per-module init function, we avoid the problem -+ // of projects based on Loki forgetting the wiring if they override module's init method (they also don't have access to private symbols). -+ t.Cfg.CompactorConfig.CompactorRing.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig) -+ t.Cfg.Distributor.DistributorRing.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig) -+ t.Cfg.IndexGateway.Ring.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig) -+ t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig) -+ t.Cfg.QueryScheduler.SchedulerRing.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig) -+ t.Cfg.Ruler.Ring.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig) -+ - return t.runtimeConfig, err - } - -@@ -194,8 +205,6 @@ func (t *Loki) initTenantConfigs() (_ services.Service, err error) { - } - - func (t *Loki) initDistributor() (services.Service, error) { -- t.Cfg.Distributor.DistributorRing.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig) -- t.Cfg.Distributor.DistributorRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - var err error - t.distributor, err = distributor.New(t.Cfg.Distributor, t.Cfg.IngesterClient, t.tenantConfigs, t.ring, t.overrides, prometheus.DefaultRegisterer) - if err != nil { -@@ -315,8 +324,6 @@ func (t *Loki) initQuerier() (services.Service, error) { - } - - func (t *Loki) initIngester() (_ services.Service, err error) { -- t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig) -- t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - t.Cfg.Ingester.LifecyclerConfig.ListenPort = t.Cfg.Server.GRPCListenPort - - t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Cfg.IngesterClient, t.Store, t.overrides, t.tenantConfigs, prometheus.DefaultRegisterer) -@@ -735,7 +742,6 @@ func (t *Loki) initRuler() (_ services.Service, err error) { - } - - t.Cfg.Ruler.Ring.ListenPort = t.Cfg.Server.GRPCListenPort -- t.Cfg.Ruler.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - - deleteStore, err := t.deleteRequestsStore() - if err != nil { -@@ -814,13 +820,20 @@ func (t *Loki) initMemberlistKV() (services.Service, error) { - dnsProvider := dns.NewProvider(util_log.Logger, dnsProviderReg, dns.GolangResolverType) - - t.MemberlistKV = memberlist.NewKVInitService(&t.Cfg.MemberlistKV, util_log.Logger, dnsProvider, reg) -+ -+ t.Cfg.CompactorConfig.CompactorRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV -+ t.Cfg.Distributor.DistributorRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV -+ t.Cfg.IndexGateway.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV -+ t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV -+ t.Cfg.QueryScheduler.SchedulerRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV -+ t.Cfg.Ruler.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV -+ - return t.MemberlistKV, nil - } - - func (t *Loki) initCompactor() (services.Service, error) { - // Set some config sections from other config sections in the config struct - t.Cfg.CompactorConfig.CompactorRing.ListenPort = t.Cfg.Server.GRPCListenPort -- t.Cfg.CompactorConfig.CompactorRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - - if !config.UsingBoltdbShipper(t.Cfg.SchemaConfig.Configs) { - level.Info(util_log.Logger).Log(""msg"", ""Not using boltdb-shipper index, not starting compactor"") -@@ -853,7 +866,6 @@ func (t *Loki) initCompactor() (services.Service, error) { - } - - func (t *Loki) initIndexGateway() (services.Service, error) { -- t.Cfg.IndexGateway.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - t.Cfg.IndexGateway.Ring.ListenPort = t.Cfg.Server.GRPCListenPort - - indexClient, err := storage.NewIndexClient(config.BoltDBShipperType, t.Cfg.StorageConfig, t.Cfg.SchemaConfig, t.overrides, t.clientMetrics, t.indexGatewayRingManager.IndexGatewayOwnsTenant, prometheus.DefaultRegisterer) -@@ -875,7 +887,6 @@ func (t *Loki) initIndexGatewayRing() (_ services.Service, err error) { - } - - t.Cfg.StorageConfig.BoltDBShipperConfig.Mode = shipper.ModeReadOnly -- t.Cfg.IndexGateway.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - t.Cfg.IndexGateway.Ring.ListenPort = t.Cfg.Server.GRPCListenPort - - managerMode := indexgateway.ClientMode -@@ -897,7 +908,6 @@ func (t *Loki) initIndexGatewayRing() (_ services.Service, err error) { - func (t *Loki) initQueryScheduler() (services.Service, error) { - // Set some config sections from other config sections in the config struct - t.Cfg.QueryScheduler.SchedulerRing.ListenPort = t.Cfg.Server.GRPCListenPort -- t.Cfg.QueryScheduler.SchedulerRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - - s, err := scheduler.NewScheduler(t.Cfg.QueryScheduler, t.overrides, util_log.Logger, prometheus.DefaultRegisterer) - if err != nil { -diff --git a/pkg/loki/modules_test.go b/pkg/loki/modules_test.go -index b29ab27ff4ac1..c0c905cd71ab3 100644 ---- a/pkg/loki/modules_test.go -+++ b/pkg/loki/modules_test.go -@@ -1,10 +1,20 @@ - package loki - - import ( -+ ""path/filepath"" - ""testing"" - ""time"" - -+ ""github.com/grafana/dskit/flagext"" -+ ""github.com/prometheus/common/model"" -+ ""github.com/stretchr/testify/require"" -+ -+ ""github.com/prometheus/client_golang/prometheus"" -+ -+ ""github.com/grafana/loki/pkg/storage"" -+ ""github.com/grafana/loki/pkg/storage/chunk/client/local"" - ""github.com/grafana/loki/pkg/storage/config"" -+ ""github.com/grafana/loki/pkg/storage/stores/shipper"" - ) - - func Test_calculateMaxLookBack(t *testing.T) { -@@ -81,3 +91,101 @@ func Test_calculateMaxLookBack(t *testing.T) { - }) - } - } -+ -+func prepareGlobalMetricsRegistry(t *testing.T) { -+ oldReg, oldGat := prometheus.DefaultRegisterer, prometheus.DefaultGatherer -+ -+ reg := prometheus.NewRegistry() -+ prometheus.DefaultRegisterer, prometheus.DefaultGatherer = reg, reg -+ -+ t.Cleanup(func() { -+ prometheus.DefaultRegisterer, prometheus.DefaultGatherer = oldReg, oldGat -+ }) -+} -+ -+func TestMultiKVSetup(t *testing.T) { -+ dir := t.TempDir() -+ -+ for target, checkFn := range map[string]func(t *testing.T, c Config){ -+ All: func(t *testing.T, c Config) { -+ require.NotNil(t, c.CompactorConfig.CompactorRing.KVStore.Multi.ConfigProvider) -+ require.NotNil(t, c.Distributor.DistributorRing.KVStore.Multi.ConfigProvider) -+ require.NotNil(t, c.IndexGateway.Ring.KVStore.Multi.ConfigProvider) -+ require.NotNil(t, c.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider) -+ require.NotNil(t, c.QueryScheduler.SchedulerRing.KVStore.Multi.ConfigProvider) -+ require.NotNil(t, c.Ruler.Ring.KVStore.Multi.ConfigProvider) -+ }, -+ -+ Compactor: func(t *testing.T, c Config) { -+ require.NotNil(t, c.CompactorConfig.CompactorRing.KVStore.Multi.ConfigProvider) -+ }, -+ -+ Distributor: func(t *testing.T, c Config) { -+ require.NotNil(t, c.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider) -+ }, -+ -+ IndexGateway: func(t *testing.T, c Config) { -+ require.NotNil(t, c.IndexGateway.Ring.KVStore.Multi.ConfigProvider) -+ }, -+ -+ Ingester: func(t *testing.T, c Config) { -+ require.NotNil(t, c.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider) -+ }, -+ -+ QueryScheduler: func(t *testing.T, c Config) { -+ require.NotNil(t, c.QueryScheduler.SchedulerRing.KVStore.Multi.ConfigProvider) -+ }, -+ -+ Ruler: func(t *testing.T, c Config) { -+ require.NotNil(t, c.Ruler.Ring.KVStore.Multi.ConfigProvider) -+ }, -+ } { -+ t.Run(target, func(t *testing.T) { -+ prepareGlobalMetricsRegistry(t) -+ -+ cfg := Config{} -+ cfg.SchemaConfig = config.SchemaConfig{ -+ Configs: []config.PeriodConfig{ -+ { -+ IndexType: config.StorageTypeInMemory, -+ ObjectType: config.StorageTypeFileSystem, -+ RowShards: 16, -+ Schema: ""v11"", -+ From: config.DayTime{ -+ Time: model.Now(), -+ }, -+ }, -+ }, -+ } -+ flagext.DefaultValues(&cfg) -+ // Set to 0 to find any free port. -+ cfg.Server.HTTPListenPort = 0 -+ cfg.Server.GRPCListenPort = 0 -+ cfg.Target = []string{target} -+ -+ // Must be set, otherwise MultiKV config provider will not be set. -+ cfg.RuntimeConfig.LoadPath = filepath.Join(dir, ""config.yaml"") -+ -+ // This would be overwritten by the default values setting. -+ cfg.StorageConfig = storage.Config{ -+ FSConfig: local.FSConfig{Directory: dir}, -+ BoltDBShipperConfig: shipper.Config{ -+ SharedStoreType: config.StorageTypeFileSystem, -+ ActiveIndexDirectory: dir, -+ CacheLocation: dir, -+ Mode: shipper.ModeWriteOnly}, -+ } -+ cfg.Ruler.Config.StoreConfig.Type = config.StorageTypeLocal -+ cfg.Ruler.Config.StoreConfig.Local.Directory = dir -+ -+ c, err := New(cfg) -+ require.NoError(t, err) -+ -+ _, err = c.ModuleManager.InitModuleServices(cfg.Target...) -+ require.NoError(t, err) -+ defer c.Server.Stop() -+ -+ checkFn(t, c.Cfg) -+ }) -+ } -+} -diff --git a/pkg/storage/config/schema_config.go b/pkg/storage/config/schema_config.go -index d7752e9fd0569..2e652e4928478 100644 ---- a/pkg/storage/config/schema_config.go -+++ b/pkg/storage/config/schema_config.go -@@ -36,6 +36,7 @@ const ( - StorageTypeGCPColumnKey = ""gcp-columnkey"" - StorageTypeGCS = ""gcs"" - StorageTypeGrpc = ""grpc-store"" -+ StorageTypeLocal = ""local"" - StorageTypeS3 = ""s3"" - StorageTypeSwift = ""swift"" - // BoltDBShipperType holds the index type for using boltdb with shipper which keeps flushing them to a shared storage",unknown,"Runtime reloadable config; ring migration setup (#6214) - -* Clean up setting of memberlist and multikv config for Loki services that -use the ring. Also sets the multi client runtime config function for all -services that use the ring. - -Signed-off-by: Callum Styan - -* Add a test for the multi kv setup - -Signed-off-by: Callum Styan - -* Fix lint issues. - -Signed-off-by: Callum Styan " -6f8bfe0c79fda038819426d989bb262a492f692c,2020-09-22 19:04:46,Owen Diehl,"Ruler docs + single binary inclusion (#2637) - -* starts alerting docs - -* ruler in single binary - -* make docs interactive - -* alerting docs - -* ruler prom alerts endpoint - -* Apply suggestions from code review - -Co-authored-by: Diana Payton <52059945+oddlittlebird@users.noreply.github.com> - -* doc fixes - -* capitalize ruler - -* removes double spaces - -* Update docs/sources/alerting/_index.md - -Co-authored-by: Diana Payton <52059945+oddlittlebird@users.noreply.github.com> - -* Apply suggestions from code review - -Co-authored-by: Ed Welch -Co-authored-by: Diana Payton <52059945+oddlittlebird@users.noreply.github.com> - -Co-authored-by: Diana Payton <52059945+oddlittlebird@users.noreply.github.com> -Co-authored-by: Ed Welch ",False,"diff --git a/docs/Makefile b/docs/Makefile -index 2ad3330081875..6c01c0e765bc7 100644 ---- a/docs/Makefile -+++ b/docs/Makefile -@@ -3,9 +3,9 @@ IMAGE = grafana/docs-base:latest - .PHONY: docs - docs: - docker pull ${IMAGE} -- docker run -v ${PWD}/sources:/hugo/content/docs/loki/latest -p 3002:3002 --rm $(IMAGE) /bin/bash -c 'mkdir -p content/docs/grafana/latest/ && touch content/docs/grafana/latest/menu.yaml && make server' -+ docker run --rm -it -v ${PWD}/sources:/hugo/content/docs/loki/latest -p 3002:3002 $(IMAGE) /bin/bash -c 'mkdir -p content/docs/grafana/latest/ && touch content/docs/grafana/latest/menu.yaml && make server' - - .PHONY: docs-test - docs-test: - docker pull ${IMAGE} -- docker run -v ${PWD}/sources:/hugo/content/docs/loki/latest -p 3002:3002 --rm $(IMAGE) /bin/bash -c 'mkdir -p content/docs/grafana/latest/ && touch content/docs/grafana/latest/menu.yaml && make prod' -\ No newline at end of file -+ docker run --rm -it -v ${PWD}/sources:/hugo/content/docs/loki/latest -p 3002:3002 $(IMAGE) /bin/bash -c 'mkdir -p content/docs/grafana/latest/ && touch content/docs/grafana/latest/menu.yaml && make prod' -diff --git a/docs/sources/alerting/_index.md b/docs/sources/alerting/_index.md -new file mode 100644 -index 0000000000000..9f1f47eeb400f ---- /dev/null -+++ b/docs/sources/alerting/_index.md -@@ -0,0 +1,259 @@ -+--- -+title: Alerting -+weight: 700 -+--- -+ -+# Alerting -+ -+Loki includes a component called the Ruler, adapted from our upstream project, Cortex. The Ruler is responsible for continually evaluating a set of configurable queries and then alerting when certain conditions happen, e.g. a high percentage of error logs. -+ -+## Prometheus Compatible -+ -+When running the Ruler (which runs by default in the single binary), Loki accepts rules files and then schedules them for continual evaluation. These are _Prometheus compatible_! This means the rules file has the same structure as in [Prometheus](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/), with the exception that the rules specified are in LogQL. -+ -+Let's see what that looks like: -+ -+The syntax of a rule file is: -+ -+```yaml -+groups: -+ [ - ] -+``` -+ -+A simple example file could be: -+ -+```yaml -+groups: -+ - name: example -+ rules: -+ - alert: HighThroughputLogStreams -+ expr: sum by(container) (rate({job=~""loki-dev/.*""}[1m])) > 1000 -+ for: 2m -+``` -+ -+### `` -+ -+```yaml -+# The name of the group. Must be unique within a file. -+name: -+ -+# How often rules in the group are evaluated. -+[ interval: | default = Ruler.evaluation_interval || 1m ] -+ -+rules: -+ [ - ... ] -+``` -+ -+### `` -+ -+The syntax for alerting rules is (see the LogQL [docs](https://grafana.com/docs/loki/latest/logql/#metric-queries) for more details): -+ -+```yaml -+# The name of the alert. Must be a valid label value. -+alert: -+ -+# The LogQL expression to evaluate (must be an instant vector). Every evaluation cycle this is -+# evaluated at the current time, and all resultant time series become -+# pending/firing alerts. -+expr: -+ -+# Alerts are considered firing once they have been returned for this long. -+# Alerts which have not yet fired for long enough are considered pending. -+[ for: | default = 0s ] -+ -+# Labels to add or overwrite for each alert. -+labels: -+ [ : ] -+ -+# Annotations to add to each alert. -+annotations: -+ [ : ] -+``` -+ -+### Example -+ -+A full-fledged example of a rules file might look like: -+ -+```yaml -+groups: -+ - name: should_fire -+ rules: -+ - alert: HighPercentageError -+ expr: | -+ sum(rate({app=""foo"", env=""production""} |= ""error"" [5m])) by (job) -+ / -+ sum(rate({app=""foo"", env=""production""}[5m])) by (job) -+ > 0.05 -+ for: 10m -+ labels: -+ severity: page -+ annotations: -+ summary: High request latency -+ - name: credentials_leak -+ rules: -+ - alert: http-credentials-leaked -+ annotations: -+ message: ""{{ $labels.job }} is leaking http basic auth credentials."" -+ expr: 'sum by (cluster, job, pod) (count_over_time({namespace=""prod""} |~ ""http(s?)://(\\w+):(\\w+)@"" [5m]) > 0)' -+ for: 10m -+ labels: -+ severity: critical -+``` -+ -+## Use cases -+ -+The Ruler's Prometheus compatibility further accentuates the marriage between metrics and logs. For those looking to get started alerting based on logs, or wondering why this might be useful, here are a few use cases we think fit very well. -+ -+### We aren't using metrics yet -+ -+Many nascent projects, apps, or even companies may not have a metrics backend yet. We tend to add logging support before metric support, so if you're in this stage, alerting based on logs can help bridge the gap. It's easy to start building Loki alerts for things like _the percentage of error logs_ such as the example from earlier: -+```yaml -+- alert: HighPercentageError -+ expr: | -+ sum(rate({app=""foo"", env=""production""} |= ""error"" [5m])) by (job) -+ / -+ sum(rate({app=""foo"", env=""production""}[5m])) by (job) -+ > 0.05 -+``` -+ -+### Black box monitoring -+ -+We don't always control the source code of applications we run. Think load balancers and the myriad components (both open source and closed third-party) that support our applications; it's a common problem that these don't expose a metric you want (or any metrics at all). How then, can we bring them into our observability stack in order to monitor them effectively? Alerting based on logs is a great answer for these problems. -+ -+For a sneak peek of how to combine this with the upcoming LogQL v2 functionality, take a look at Ward Bekker's [video](https://www.youtube.com/watch?v=RwQlR3D4Km4) which builds a robust nginx monitoring dashboard entirely from nginx logs. -+ -+### Event alerting -+ -+Sometimes you want to know whether _any_ instance of something has occurred. Alerting based on logs can be a great way to handle this, such as finding examples of leaked authentication credentials: -+```yaml -+- name: credentials_leak -+ rules: -+ - alert: http-credentials-leaked -+ annotations: -+ message: ""{{ $labels.job }} is leaking http basic auth credentials."" -+ expr: 'sum by (cluster, job, pod) (count_over_time({namespace=""prod""} |~ ""http(s?)://(\\w+):(\\w+)@"" [5m]) > 0)' -+ for: 10m -+ labels: -+ severity: critical -+``` -+ -+### Alerting on high-cardinality sources -+ -+Another great use case is alerting on high cardinality sources. These are things which are difficult/expensive to record as metrics because the potential label set is huge. A great example of this is per-tenant alerting in multi-tenanted systems like Loki. It's a common balancing act between the desire to have per-tenant metrics and the cardinality explosion that ensues (adding a single _tenant_ label to an existing Prometheus metric would increase it's cardinality by the number of tenants). -+ -+Creating these alerts in LogQL is attractive because these metrics can be extracted at _query time_, meaning we don't suffer the cardinality explosion in our metrics store. -+ -+> **Note:** To really take advantage of this, we'll need some features from the upcoming LogQL v2 language. Stay tuned. -+ -+## Interacting with the Ruler -+ -+Because the rule files are identical to Prometheus rule files, we can interact with the Loki Ruler via [`cortex-tool`](https://github.com/grafana/cortex-tools#rules). The CLI is in early development, but works alongside both Loki and cortex. Make sure to pass the `--backend=loki` argument to commands when using it with Loki. -+ -+> **Note:** Not all commands in cortextool currently support Loki. -+ -+An example workflow is included below: -+ -+```sh -+# diff rules against the currently managed ruleset in Loki -+cortextool rules diff --rule-dirs=./output --backend=loki -+ -+# ensure the remote ruleset matches your local ruleset, creating/updating/deleting remote rules which differ from your local specification. -+cortextool rules sync --rule-dirs=./output --backend=loki -+ -+# print the remote ruleset -+cortextool rules print --backend=loki -+``` -+ -+There is also a [github action](https://github.com/grafana/cortex-rules-action) available for `cortex-tool`, so you can add it into your CI/CD pipelines! -+ -+For instance, you can sync rules on master builds via -+```yaml -+name: sync-cortex-rules-and-alerts -+on: -+ push: -+ branches: -+ - master -+env: -+ CORTEX_ADDRESS: '' -+ CORTEX_TENANT_ID: '' -+ CORTEX_API_KEY: ${{ secrets.API_KEY }} -+ RULES_DIR: 'output/' -+jobs: -+ sync-loki-alerts: -+ runs-on: ubuntu-18.04 -+ steps: -+ - name: Diff rules -+ id: diff-rules -+ uses: grafana/cortex-rules-action@v0.3.0 -+ env: -+ ACTION: 'diff' -+ with: -+ args: --backend=loki -+ - name: Sync rules -+ if: ${{ !contains(steps.diff-rules.outputs.detailed, 'no changes detected') }} -+ uses: grafana/cortex-rules-action@v0.3.0 -+ env: -+ ACTION: 'sync' -+ with: -+ args: --backend=loki -+ - name: Print rules -+ uses: grafana/cortex-rules-action@v0.3.0 -+ env: -+ ACTION: 'print' -+``` -+ -+## Scheduling and best practices -+ -+One option to scale the Ruler is by scaling it horizontally. However, with multiple Ruler instances running they will need to coordinate to determine which instance will evaluate which rule. Similar to the ingesters, the Rulers establish a hash ring to divide up the responsibilities of evaluating rules. -+ -+The possible configurations are listed fully in the configuration [docs](https://grafana.com/docs/loki/latest/configuration/), but in order to shard rules across multiple Rulers, the rules API must be enabled via flag (`-experimental.Ruler.enable-api`) or config file parameter. Secondly, the Ruler requires it's own ring be configured. From there the Rulers will shard and handle the division of rules automatically. Unlike ingesters, Rulers do not hand over responsibility: all rules are re-sharded randomly every time a Ruler is added to or removed from the ring. -+ -+A full Ruler config example is: -+ -+```yaml -+Ruler: -+ alertmanager_url: -+ enable_alertmanager_v2: true -+ enable_api: true -+ enable_sharding: true -+ ring: -+ kvstore: -+ consul: -+ host: consul.loki-dev.svc.cluster.local:8500 -+ store: consul -+ rule_path: /tmp/rules -+ storage: -+ gcs: -+ bucket_name: -+``` -+ -+## Ruler storage -+ -+The Ruler supports six kinds of storage: configdb, azure, gcs, s3, swift, and local. Most kinds of storage work with the sharded Ruler configuration in an obvious way, i.e. configure all Rulers to use the same backend. -+ -+The local implementation reads the rule files off of the local filesystem. This is a read only backend that does not support the creation and deletion of rules through [the API](https://grafana.com/docs/loki/latest/api/#Ruler). Despite the fact that it reads the local filesystem this method can still be used in a sharded Ruler configuration if the operator takes care to load the same rules to every Ruler. For instance this could be accomplished by mounting a [Kubernetes ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) onto every Ruler pod. -+ -+A typical local configuration might look something like: -+``` -+ -Ruler.storage.type=local -+ -Ruler.storage.local.directory=/tmp/loki/rules -+``` -+ -+With the above configuration, the Ruler would expect the following layout: -+``` -+/tmp/loki/rules//rules1.yaml -+ /rules2.yaml -+``` -+Yaml files are expected to be in the [Prometheus format](#Prometheus_Compatible) but include LogQL expressions as specified in the beginning of this doc. -+ -+## Future improvements -+ -+There are a few things coming to increase the robustness of this service. In no particular order: -+ -+- Recording rules. -+- Backend metric stores adapters for generated alert and recording rule data. The first will likely be Cortex, as Loki is built atop it. -+- Introduce LogQL v2. -+ -+## Misc Details: Metrics backends vs in-memory -+ -+Currently the Loki Ruler is decoupled from a backing Prometheus store. Generally, the result of evaluating rules as well as the history of the alert's state are stored as a time series. Loki is unable to store/retrieve these in order to allow it to run independently of i.e. Prometheus. As a workaround, Loki keeps a small in memory store whose purpose is to lazy load past evaluations when rescheduling or resharding Rulers. In the future, Loki will support optional metrics backends, allowing storage of these metrics for auditing & performance benefits. -diff --git a/docs/sources/api/_index.md b/docs/sources/api/_index.md -index 3c14e964fce90..3c083d89e82d3 100644 ---- a/docs/sources/api/_index.md -+++ b/docs/sources/api/_index.md -@@ -41,8 +41,22 @@ The HTTP API includes the following endpoints: - - [Series](#series) - - [Examples](#examples-9) - - [Statistics](#statistics) -- --## Microservices Mode -+ - [`GET /ruler/ring`](#ruler-ring-status) -+ - [`GET /loki/api/v1/rules`](#list-rule-groups) -+ - [`GET /loki/api/v1/rules/{namespace}`](#get-rule-groups-by-namespace) -+ - [`GET /loki/api/v1/rules/{namespace}/{groupName}`](#get-rule-group) -+ - [`POST /loki/api/v1/rules/{namespace}`](#set-rule-group) -+ - [`DELETE /loki/api/v1/rules/{namespace}/{groupName}`](#delete-rule-group) -+ - [`DELETE /loki/api/v1/rules/{namespace}`](#delete-namespace) -+ - [`GET /api/prom/rules`](#list-rule-groups) -+ - [`GET /api/prom/rules/{namespace}`](#get-rule-groups-by-namespace) -+ - [`GET /api/prom/rules/{namespace}/{groupName}`](#get-rule-group) -+ - [`POST /api/prom/rules/{namespace}`](#set-rule-group) -+ - [`DELETE /api/prom/rules/{namespace}/{groupName}`](#delete-rule-group) -+ - [`DELETE /api/prom/rules/{namespace}`](#delete-namespace) -+ - [`GET /prometheus/api/v1/alerts`](#list-alerts) -+ -+## Microservices mode - - When deploying Loki in microservices mode, the set of endpoints exposed by each - component is different. -@@ -95,9 +109,28 @@ And these endpoints are exposed by just the ingester: - - The API endpoints starting with `/loki/` are [Prometheus API-compatible](https://prometheus.io/docs/prometheus/latest/querying/api/) and the result formats can be used interchangeably. - -+These endpoints are exposed by the ruler: -+ -+- [`GET /ruler/ring`](#ruler-ring-status) -+- [`GET /api/v1/rules`](#list-rules) -+- [`GET /api/v1/rules`](#list-rule-groups) -+- [`GET /api/v1/rules/{namespace}`](#get-rule-groups-by-namespace) -+- [`GET /api/v1/rules/{namespace}/{groupName}`](#get-rule-group) -+- [`POST /api/v1/rules/{namespace}`](#set-rule-group) -+- [`DELETE /api/v1/rules/{namespace}/{groupName}`](#delete-rule-group) -+- [`DELETE /api/v1/rules/{namespace}`](#delete-namespace) -+- [`GET /api/prom/rules`](#list-rules) -+- [`GET /api/prom/rules`](#list-rule-groups) -+- [`GET /api/prom/rules/{namespace}`](#get-rule-groups-by-namespace) -+- [`GET /api/prom/rules/{namespace}/{groupName}`](#get-rule-group) -+- [`POST /api/prom/rules/{namespace}`](#set-rule-group) -+- [`DELETE /api/prom/rules/{namespace}/{groupName}`](#delete-rule-group) -+- [`DELETE /api/prom/rules/{namespace}`](#delete-namespace) -+- [`GET /prometheus/api/v1/alerts`](#list-alerts) -+ - A [list of clients](../clients) can be found in the clients documentation. - --## Matrix, Vector, And Streams -+## Matrix, vector, and streams - - Some Loki API endpoints return a result of a matrix, a vector, or a stream: - -@@ -936,3 +969,162 @@ The example belows show all possible statistics returned with their respective d - } - } - ``` -+ -+## Ruler -+ -+The ruler API endpoints require to configure a backend object storage to store the recording rules and alerts. The ruler API uses the concept of a ""namespace"" when creating rule groups. This is a stand-in for the name of the rule file in Prometheus. Rule groups must be named uniquely within a namespace. -+ -+### Ruler ring status -+ -+``` -+GET /ruler/ring -+``` -+ -+Displays a web page with the ruler hash ring status, including the state, healthy and last heartbeat time of each ruler. -+ -+### List rule groups -+ -+``` -+GET /loki/api/v1/rules -+``` -+ -+List all rules configured for the authenticated tenant. This endpoint returns a YAML dictionary with all the rule groups for each namespace and `200` status code on success. -+ -+_This experimental endpoint is disabled by default and can be enabled via the `-experimental.ruler.enable-api` CLI flag (or its respective YAML config option)._ -+ -+#### Example response -+ -+```yaml -+--- -+: -+- name: -+ interval: -+ rules: -+ - alert: -+ expr: -+ for: -+ annotations: -+ : -+ labels: -+ : -+- name: -+ interval: -+ rules: -+ - alert: -+ expr: -+ for: -+ annotations: -+ : -+ labels: -+ : -+: -+- name: -+ interval: -+ rules: -+ - alert: -+ expr: -+ for: -+ annotations: -+ : -+ labels: -+ : -+``` -+ -+### Get rule groups by namespace -+ -+``` -+GET /loki/api/v1/rules/{namespace} -+``` -+ -+Returns the rule groups defined for a given namespace. -+ -+_This experimental endpoint is disabled by default and can be enabled via the `-experimental.ruler.enable-api` CLI flag (or its respective YAML config option)._ -+ -+#### Example response -+ -+```yaml -+name: -+interval: -+rules: -+ - alert: -+ expr: -+ for: -+ annotations: -+ : -+ labels: -+ : -+``` -+ -+### Get rule group -+ -+``` -+GET /loki/api/v1/rules/{namespace}/{groupName} -+``` -+ -+Returns the rule group matching the request namespace and group name. -+ -+_This experimental endpoint is disabled by default and can be enabled via the `-experimental.ruler.enable-api` CLI flag (or its respective YAML config option)._ -+ -+### Set rule group -+ -+``` -+POST /loki/api/v1/rules/{namespace} -+``` -+ -+Creates or updates a rule group. This endpoint expects a request with `Content-Type: application/yaml` header and the rules **YAML** definition in the request body, and returns `202` on success. -+ -+_This experimental endpoint is disabled by default and can be enabled via the `-experimental.ruler.enable-api` CLI flag (or its respective YAML config option)._ -+ -+#### Example request -+ -+Request headers: -+- `Content-Type: application/yaml` -+ -+Request body: -+ -+```yaml -+name: -+interval: -+rules: -+ - alert: -+ expr: -+ for: -+ annotations: -+ : -+ labels: -+ : -+``` -+ -+### Delete rule group -+ -+``` -+DELETE /loki/api/v1/rules/{namespace}/{groupName} -+ -+``` -+ -+Deletes a rule group by namespace and group name. This endpoints returns `202` on success. -+ -+### Delete namespace -+ -+``` -+DELETE /loki/api/v1/rules/{namespace} -+``` -+ -+Deletes all the rule groups in a namespace (including the namespace itself). This endpoint returns `202` on success. -+ -+_This experimental endpoint is disabled by default and can be enabled via the `-experimental.ruler.enable-api` CLI flag (or its respective YAML config option)._ -+ -+_Requires [authentication](#authentication)._ -+ -+ -+### List alerts -+ -+``` -+GET /prometheus/api/v1/alerts -+``` -+ -+Prometheus-compatible rules endpoint to list all active alerts. -+ -+_For more information, please check out the Prometheus [alerts](https://prometheus.io/docs/prometheus/latest/querying/api/#alerts) documentation._ -+ -+_This experimental endpoint is disabled by default and can be enabled via the `-experimental.ruler.enable-api` CLI flag (or its respective YAML config option)._ -diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md -index 1d333b5f4dbd0..796525ea38ce8 100644 ---- a/docs/sources/configuration/_index.md -+++ b/docs/sources/configuration/_index.md -@@ -18,7 +18,8 @@ Configuration examples can be found in the [Configuration Examples](examples/) d - - [querier_config](#querier_config) - - [query_frontend_config](#query_frontend_config) - - [queryrange_config](#queryrange_config) -- - [`frontend_worker_config`](#frontend_worker_config) -+ - [ruler_config](#ruler_config) -+ - [frontend_worker_config](#frontend_worker_config) - - [ingester_client_config](#ingester_client_config) - - [ingester_config](#ingester_config) - - [consul_config](#consul_config) -@@ -103,6 +104,9 @@ Supported contents and default values of `loki.yaml`: - # query-frontend. - [query_range: ] - -+# The ruler_config configures the Loki ruler. -+[ruler: ] -+ - # Configures how the distributor will connect to ingesters. Only appropriate - # when running all modules, the distributor, or the querier. - [ingester_client: ] -@@ -332,7 +336,339 @@ results_cache: - [parallelise_shardable_queries: | default = false] - ``` - --## `frontend_worker_config` -+## `ruler_config` -+ -+The `ruler_config` configures the Loki ruler. -+ -+```yaml -+# URL of alerts return path. -+# CLI flag: -ruler.external.url -+[external_url: | default = ] -+ -+ruler_client: -+ # Path to the client certificate file, which will be used for authenticating -+ # with the server. Also requires the key path to be configured. -+ # CLI flag: -ruler.client.tls-cert-path -+ [tls_cert_path: | default = """"] -+ -+ # Path to the key file for the client certificate. Also requires the client -+ # certificate to be configured. -+ # CLI flag: -ruler.client.tls-key-path -+ [tls_key_path: | default = """"] -+ -+ # Path to the CA certificates file to validate server certificate against. If -+ # not set, the host's root CA certificates are used. -+ # CLI flag: -ruler.client.tls-ca-path -+ [tls_ca_path: | default = """"] -+ -+ # Skip validating server certificate. -+ # CLI flag: -ruler.client.tls-insecure-skip-verify -+ [tls_insecure_skip_verify: | default = false] -+ -+# How frequently to evaluate rules -+# CLI flag: -ruler.evaluation-interval -+[evaluation_interval: | default = 1m] -+ -+# How frequently to poll for rule changes -+# CLI flag: -ruler.poll-interval -+[poll_interval: | default = 1m] -+ -+storage: -+ # Method to use for backend rule storage (azure, gcs, s3, swift, local) -+ # CLI flag: -ruler.storage.type -+ [type: ] -+ -+ azure: -+ # Azure Cloud environment. Supported values are: AzureGlobal, -+ # AzureChinaCloud, AzureGermanCloud, AzureUSGovernment. -+ # CLI flag: -ruler.storage.azure.environment -+ [environment: | default = ""AzureGlobal""] -+ -+ # Name of the blob container used to store chunks. This container must be -+ # created before running cortex. -+ # CLI flag: -ruler.storage.azure.container-name -+ [container_name: | default = ""cortex""] -+ -+ # The Microsoft Azure account name to be used -+ # CLI flag: -ruler.storage.azure.account-name -+ [account_name: | default = """"] -+ -+ # The Microsoft Azure account key to use. -+ # CLI flag: -ruler.storage.azure.account-key -+ [account_key: | default = """"] -+ -+ # Preallocated buffer size for downloads. -+ # CLI flag: -ruler.storage.azure.download-buffer-size -+ [download_buffer_size: | default = 512000] -+ -+ # Preallocated buffer size for uploads. -+ # CLI flag: -ruler.storage.azure.upload-buffer-size -+ [upload_buffer_size: | default = 256000] -+ -+ # Number of buffers used to used to upload a chunk. -+ # CLI flag: -ruler.storage.azure.download-buffer-count -+ [upload_buffer_count: | default = 1] -+ -+ # Timeout for requests made against azure blob storage. -+ # CLI flag: -ruler.storage.azure.request-timeout -+ [request_timeout: | default = 30s] -+ -+ # Number of retries for a request which times out. -+ # CLI flag: -ruler.storage.azure.max-retries -+ [max_retries: | default = 5] -+ -+ # Minimum time to wait before retrying a request. -+ # CLI flag: -ruler.storage.azure.min-retry-delay -+ [min_retry_delay: | default = 10ms] -+ -+ # Maximum time to wait before retrying a request. -+ # CLI flag: -ruler.storage.azure.max-retry-delay -+ [max_retry_delay: | default = 500ms] -+ -+ gcs: -+ # Name of GCS bucket to put chunks in. -+ # CLI flag: -ruler.storage.gcs.bucketname -+ [bucket_name: | default = """"] -+ -+ # The size of the buffer that GCS client for each PUT request. 0 to disable -+ # buffering. -+ # CLI flag: -ruler.storage.gcs.chunk-buffer-size -+ [chunk_buffer_size: | default = 0] -+ -+ # The duration after which the requests to GCS should be timed out. -+ # CLI flag: -ruler.storage.gcs.request-timeout -+ [request_timeout: | default = 0s] -+ -+ s3: -+ # S3 endpoint URL with escaped Key and Secret encoded. If only region is -+ # specified as a host, proper endpoint will be deduced. Use -+ # inmemory:/// to use a mock in-memory implementation. -+ # CLI flag: -ruler.storage.s3.url -+ [s3: | default = ] -+ -+ # Set this to `true` to force the request to use path-style addressing. -+ # CLI flag: -ruler.storage.s3.force-path-style -+ [s3forcepathstyle: | default = false] -+ -+ # Comma separated list of bucket names to evenly distribute chunks over. -+ # Overrides any buckets specified in s3.url flag -+ # CLI flag: -ruler.storage.s3.buckets -+ [bucketnames: | default = """"] -+ -+ # S3 Endpoint to connect to. -+ # CLI flag: -ruler.storage.s3.endpoint -+ [endpoint: | default = """"] -+ -+ # AWS region to use. -+ # CLI flag: -ruler.storage.s3.region -+ [region: | default = """"] -+ -+ # AWS Access Key ID -+ # CLI flag: -ruler.storage.s3.access-key-id -+ [access_key_id: | default = """"] -+ -+ # AWS Secret Access Key -+ # CLI flag: -ruler.storage.s3.secret-access-key -+ [secret_access_key: | default = """"] -+ -+ # Disable https on S3 connection. -+ # CLI flag: -ruler.storage.s3.insecure -+ [insecure: | default = false] -+ -+ # Enable AES256 AWS server-side encryption -+ # CLI flag: -ruler.storage.s3.sse-encryption -+ [sse_encryption: | default = false] -+ -+ http_config: -+ # The maximum amount of time an idle connection will be held open. -+ # CLI flag: -ruler.storage.s3.http.idle-conn-timeout -+ [idle_conn_timeout: | default = 1m30s] -+ -+ # If non-zero, specifies the amount of time to wait for a server's -+ # response headers after fully writing the request. -+ # CLI flag: -ruler.storage.s3.http.response-header-timeout -+ [response_header_timeout: | default = 0s] -+ -+ # Set to false to skip verifying the certificate chain and hostname. -+ # CLI flag: -ruler.storage.s3.http.insecure-skip-verify -+ [insecure_skip_verify: | default = false] -+ -+ swift: -+ # Openstack authentication URL. -+ # CLI flag: -ruler.storage.swift.auth-url -+ [auth_url: | default = """"] -+ -+ # Openstack username for the api. -+ # CLI flag: -ruler.storage.swift.username -+ [username: | default = """"] -+ -+ # Openstack user's domain name. -+ # CLI flag: -ruler.storage.swift.user-domain-name -+ [user_domain_name: | default = """"] -+ -+ # Openstack user's domain ID. -+ # CLI flag: -ruler.storage.swift.user-domain-id -+ [user_domain_id: | default = """"] -+ -+ # Openstack user ID for the API. -+ # CLI flag: -ruler.storage.swift.user-id -+ [user_id: | default = """"] -+ -+ # Openstack API key. -+ # CLI flag: -ruler.storage.swift.password -+ [password: | default = """"] -+ -+ # Openstack user's domain ID. -+ # CLI flag: -ruler.storage.swift.domain-id -+ [domain_id: | default = """"] -+ -+ # Openstack user's domain name. -+ # CLI flag: -ruler.storage.swift.domain-name -+ [domain_name: | default = """"] -+ -+ # Openstack project ID (v2,v3 auth only). -+ # CLI flag: -ruler.storage.swift.project-id -+ [project_id: | default = """"] -+ -+ # Openstack project name (v2,v3 auth only). -+ # CLI flag: -ruler.storage.swift.project-name -+ [project_name: | default = """"] -+ -+ # ID of the project's domain (v3 auth only), only needed if it differs the -+ # from user domain. -+ # CLI flag: -ruler.storage.swift.project-domain-id -+ [project_domain_id: | default = """"] -+ -+ # Name of the project's domain (v3 auth only), only needed if it differs -+ # from the user domain. -+ # CLI flag: -ruler.storage.swift.project-domain-name -+ [project_domain_name: | default = """"] -+ -+ # Openstack Region to use eg LON, ORD - default is use first region (v2,v3 -+ # auth only) -+ # CLI flag: -ruler.storage.swift.region-name -+ [region_name: | default = """"] -+ -+ # Name of the Swift container to put chunks in. -+ # CLI flag: -ruler.storage.swift.container-name -+ [container_name: | default = ""cortex""] -+ -+ local: -+ # Directory to scan for rules -+ # CLI flag: -ruler.storage.local.directory -+ [directory: | default = """"] -+ -+# File path to store temporary rule files -+# CLI flag: -ruler.rule-path -+[rule_path: | default = ""/rules""] -+ -+# Comma-separated list of Alertmanager URLs to send notifications to. -+# Each Alertmanager URL is treated as a separate group in the configuration. -+# Multiple Alertmanagers in HA per group can be supported by using DNS -+# resolution via -ruler.alertmanager-discovery. -+# CLI flag: -ruler.alertmanager-url -+[alertmanager_url: | default = """"] -+ -+# Use DNS SRV records to discover Alertmanager hosts. -+# CLI flag: -ruler.alertmanager-discovery -+[enable_alertmanager_discovery: | default = false] -+ -+# How long to wait between refreshing DNS resolutions of Alertmanager hosts. -+# CLI flag: -ruler.alertmanager-refresh-interval -+[alertmanager_refresh_interval: | default = 1m] -+ -+# If enabled, then requests to Alertmanager use the v2 API. -+# CLI flag: -ruler.alertmanager-use-v2 -+[enable_alertmanager_v2: | default = false] -+ -+# Capacity of the queue for notifications to be sent to the Alertmanager. -+# CLI flag: -ruler.notification-queue-capacity -+[notification_queue_capacity: | default = 10000] -+ -+# HTTP timeout duration when sending notifications to the Alertmanager. -+# CLI flag: -ruler.notification-timeout -+[notification_timeout: | default = 10s] -+ -+# Max time to tolerate outage for restoring ""for"" state of alert. -+# CLI flag: -ruler.for-outage-tolerance -+[for_outage_tolerance: | default = 1h] -+ -+# Minimum duration between alert and restored ""for"" state. This is maintained -+# only for alerts with configured ""for"" time greater than the grace period. -+# CLI flag: -ruler.for-grace-period -+[for_grace_period: | default = 10m] -+ -+# Minimum amount of time to wait before resending an alert to Alertmanager. -+# CLI flag: -ruler.resend-delay -+[resend_delay: | default = 1m] -+ -+# Distribute rule evaluation using ring backend. -+# CLI flag: -ruler.enable-sharding -+[enable_sharding: | default = false] -+ -+# Time to spend searching for a pending ruler when shutting down. -+# CLI flag: -ruler.search-pending-for -+[search_pending_for: | default = 5m] -+ -+ring: -+ kvstore: -+ # Backend storage to use for the ring. Supported values are: consul, etcd, -+ # inmemory, memberlist, multi. -+ # CLI flag: -ruler.ring.store -+ [store: | default = ""consul""] -+ -+ # The prefix for the keys in the store. Should end with a /. -+ # CLI flag: -ruler.ring.prefix -+ [prefix: | default = ""rulers/""] -+ -+ # The consul_config configures the consul client. -+ # The CLI flags prefix for this block config is: ruler.ring -+ [consul: ] -+ -+ # The etcd_config configures the etcd client. -+ # The CLI flags prefix for this block config is: ruler.ring -+ [etcd: ] -+ -+ multi: -+ # Primary backend storage used by multi-client. -+ # CLI flag: -ruler.ring.multi.primary -+ [primary: | default = """"] -+ -+ # Secondary backend storage used by multi-client. -+ # CLI flag: -ruler.ring.multi.secondary -+ [secondary: | default = """"] -+ -+ # Mirror writes to secondary store. -+ # CLI flag: -ruler.ring.multi.mirror-enabled -+ [mirror_enabled: | default = false] -+ -+ # Timeout for storing value to secondary store. -+ # CLI flag: -ruler.ring.multi.mirror-timeout -+ [mirror_timeout: | default = 2s] -+ -+ # Period at which to heartbeat to the ring. -+ # CLI flag: -ruler.ring.heartbeat-period -+ [heartbeat_period: | default = 5s] -+ -+ # The heartbeat timeout after which rulers are considered unhealthy within the -+ # ring. -+ # CLI flag: -ruler.ring.heartbeat-timeout -+ [heartbeat_timeout: | default = 1m] -+ -+ # Number of tokens for each ingester. -+ # CLI flag: -ruler.ring.num-tokens -+ [num_tokens: | default = 128] -+ -+# Period with which to attempt to flush rule groups. -+# CLI flag: -ruler.flush-period -+[flush_period: | default = 1m] -+ -+# Enable the Ruler API. -+# CLI flag: -experimental.ruler.enable-api -+[enable_api: | default = false] -+``` -+ -+## frontend_worker_config - - The `frontend_worker_config` configures the worker - running within the Loki querier - picking up and executing queries enqueued by the query-frontend. - -diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go -index 8f5b24e28b26a..78d6b9f075879 100644 ---- a/pkg/loki/loki.go -+++ b/pkg/loki/loki.go -@@ -349,7 +349,7 @@ func (t *Loki) setupModuleManager() error { - TableManager: {Server}, - Compactor: {Server}, - IngesterQuerier: {Ring}, -- All: {Querier, Ingester, Distributor, TableManager}, -+ All: {Querier, Ingester, Distributor, TableManager, Ruler}, - } - - // Add IngesterQuerier as a dependency for store when target is either ingester or querier.",unknown,"Ruler docs + single binary inclusion (#2637) - -* starts alerting docs - -* ruler in single binary - -* make docs interactive - -* alerting docs - -* ruler prom alerts endpoint - -* Apply suggestions from code review - -Co-authored-by: Diana Payton <52059945+oddlittlebird@users.noreply.github.com> - -* doc fixes - -* capitalize ruler - -* removes double spaces - -* Update docs/sources/alerting/_index.md - -Co-authored-by: Diana Payton <52059945+oddlittlebird@users.noreply.github.com> - -* Apply suggestions from code review - -Co-authored-by: Ed Welch -Co-authored-by: Diana Payton <52059945+oddlittlebird@users.noreply.github.com> - -Co-authored-by: Diana Payton <52059945+oddlittlebird@users.noreply.github.com> -Co-authored-by: Ed Welch " -03d4238700bafa548c1b9d8d6a4b3707a3d8b754,2022-04-15 22:34:58,Karen Miller,"Fix SSD Docker installation (#5916) - -* Fix SSD Docker installation - -* Clarify endpoints and ports",False,"diff --git a/docs/sources/installation/simple-scalable-docker.md b/docs/sources/installation/simple-scalable-docker.md -index 60faf08bb0f9d..6a5d100fe3c83 100644 ---- a/docs/sources/installation/simple-scalable-docker.md -+++ b/docs/sources/installation/simple-scalable-docker.md -@@ -45,8 +45,12 @@ docker-compose up - - The running Docker containers use the directory's configuration files. - --Navigate to http://localhost:3100/ready to check for cluster readiness. --Navigate to http://localhost:3100/metrics to view the cluster metrics. -+Navigate to http://localhost:3101/ready to check for read container readiness. -+Navigate to http://localhost:3101/metrics to view read container metrics. -+ -+Navigate to http://localhost:3102/ready to check for write container readiness. -+Navigate to http://localhost:3102/metrics to view write container metrics. -+ - Navigate to http://localhost:3000 for the Grafana instance that has Loki configured as a datasource. - - By default, the image runs processes as user loki with UID `10001` and GID `10001`. -diff --git a/production/simple-scalable/docker-compose.yaml b/production/simple-scalable/docker-compose.yaml -index cc6caf9d36beb..c34aeebe58019 100644 ---- a/production/simple-scalable/docker-compose.yaml -+++ b/production/simple-scalable/docker-compose.yaml -@@ -9,7 +9,7 @@ services: - image: grafana/loki:2.5.0 - command: ""-config.file=/etc/loki/config.yaml -target=read"" - ports: -- - 3100 -+ - 3101:3100 - - 7946 - - 9095 - volumes: -@@ -25,7 +25,7 @@ services: - image: grafana/loki:2.5.0 - command: ""-config.file=/etc/loki/config.yaml -target=write"" - ports: -- - 3100 -+ - 3102:3100 - - 7946 - - 9095 - volumes:",unknown,"Fix SSD Docker installation (#5916) - -* Fix SSD Docker installation - -* Clarify endpoints and ports" -011692c1471984a956f86f58e446aefcfa5eee2f,2023-05-01 13:36:10,Dmitry Misharov,"/loki/api/v1/delete is routed to backend url (#9336) - -**What this PR does / why we need it**: -This PR adds a routing rule for `/loki/api/v1/delete` endpoint to -`gateway` component. - -**Which issue(s) this PR fixes**: -Fixes #9325 - -**Special notes for your reviewer**: - -**Checklist** -- [x] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [ ] Documentation added -- [ ] Tests updated -- [ ] `CHANGELOG.md` updated -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/upgrading/_index.md`",False,"diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl -index bec9c665269a6..b5a5c63a5ced1 100644 ---- a/production/helm/loki/templates/_helpers.tpl -+++ b/production/helm/loki/templates/_helpers.tpl -@@ -634,6 +634,10 @@ http { - proxy_pass {{ $backendUrl }}$request_uri; - } - -+ location ~ /loki/api/v1/delete.* { -+ proxy_pass {{ $backendUrl }}$request_uri; -+ } -+ - location ~ /distributor/.* { - proxy_pass {{ $writeUrl }}$request_uri; - }",unknown,"/loki/api/v1/delete is routed to backend url (#9336) - -**What this PR does / why we need it**: -This PR adds a routing rule for `/loki/api/v1/delete` endpoint to -`gateway` component. - -**Which issue(s) this PR fixes**: -Fixes #9325 - -**Special notes for your reviewer**: - -**Checklist** -- [x] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [ ] Documentation added -- [ ] Tests updated -- [ ] `CHANGELOG.md` updated -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/upgrading/_index.md`" -c3d3f2ba777a9f57a95e5b2ec67b641af9d69922,2020-06-15 23:45:16,Fredrik Enestad,docs: BoltDB typo (#2217),False,"diff --git a/docs/operations/storage/README.md b/docs/operations/storage/README.md -index effa70216166f..c3d37917bc6ac 100644 ---- a/docs/operations/storage/README.md -+++ b/docs/operations/storage/README.md -@@ -26,7 +26,7 @@ The following are supported for the index: - * [Google Bigtable](https://cloud.google.com/bigtable) - * [Apache Cassandra](https://cassandra.apache.org) - * [BoltDB](https://github.com/boltdb/bolt) (doesn't work when clustering Loki) --* [Boltb-Shipper](boltdb-shipper.md) EXPERIMENTAL index store which stores boltdb index files in the object store -+* [BoltDB Shipper](boltdb-shipper.md) EXPERIMENTAL index store which stores boltdb index files in the object store - - The following are supported for the chunks:",docs,BoltDB typo (#2217) -6f491233cae226d54d190521d2b935249d88ad05,2024-09-03 17:59:06,renovate[bot],"fix(deps): update aws-sdk-go-v2 monorepo (#13986) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod -index fb35609574b72..9679448043513 100644 ---- a/tools/lambda-promtail/go.mod -+++ b/tools/lambda-promtail/go.mod -@@ -5,8 +5,8 @@ go 1.22 - require ( - github.com/aws/aws-lambda-go v1.47.0 - github.com/aws/aws-sdk-go-v2 v1.30.4 -- github.com/aws/aws-sdk-go-v2/config v1.27.28 -- github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0 -+ github.com/aws/aws-sdk-go-v2/config v1.27.31 -+ github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0 - github.com/go-kit/log v0.2.1 - github.com/gogo/protobuf v1.3.2 - github.com/golang/snappy v0.0.4 -@@ -24,7 +24,7 @@ require ( - github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect - github.com/armon/go-metrics v0.4.1 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect -- github.com/aws/aws-sdk-go-v2/credentials v1.17.28 // indirect -+ github.com/aws/aws-sdk-go-v2/credentials v1.17.30 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect -@@ -36,7 +36,7 @@ require ( - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 // indirect -- github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 // indirect -+ github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 // indirect - github.com/aws/smithy-go v1.20.4 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/buger/jsonparser v1.1.1 // indirect -diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum -index 2627682cc9454..17803d1c55389 100644 ---- a/tools/lambda-promtail/go.sum -+++ b/tools/lambda-promtail/go.sum -@@ -52,10 +52,10 @@ github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDag - github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU= - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= --github.com/aws/aws-sdk-go-v2/config v1.27.28 h1:OTxWGW/91C61QlneCtnD62NLb4W616/NM1jA8LhJqbg= --github.com/aws/aws-sdk-go-v2/config v1.27.28/go.mod h1:uzVRVtJSU5EFv6Fu82AoVFKozJi2ZCY6WRCXj06rbvs= --github.com/aws/aws-sdk-go-v2/credentials v1.17.28 h1:m8+AHY/ND8CMHJnPoH7PJIRakWGa4gbfbxuY9TGTUXM= --github.com/aws/aws-sdk-go-v2/credentials v1.17.28/go.mod h1:6TF7dSc78ehD1SL6KpRIPKMA1GyyWflIkjqg+qmf4+c= -+github.com/aws/aws-sdk-go-v2/config v1.27.31 h1:kxBoRsjhT3pq0cKthgj6RU6bXTm/2SgdoUMyrVw0rAI= -+github.com/aws/aws-sdk-go-v2/config v1.27.31/go.mod h1:z04nZdSWFPaDwK3DdJOG2r+scLQzMYuJeW0CujEm9FM= -+github.com/aws/aws-sdk-go-v2/credentials v1.17.30 h1:aau/oYFtibVovr2rDt8FHlU17BTicFEMAi29V1U+L5Q= -+github.com/aws/aws-sdk-go-v2/credentials v1.17.30/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g= - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJCkMC0lMy6FaCD51jm6ayE= - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY= -@@ -74,14 +74,14 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 h1:tJ5RnkHC - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c= - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 h1:jg16PhLPUiHIj8zYIW6bqzeQSuHVEiWnGA0Brz5Xv2I= - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY= --github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0 h1:Cso4Ev/XauMVsbwdhYEoxg8rxZWw43CFqqaPB5w3W2c= --github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= -+github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0 h1:Wb544Wh+xfSXqJ/j3R4aX9wrKUoZsJNmilBYZb3mKQ4= -+github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= - github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 h1:zCsFCKvbj25i7p1u94imVoO447I/sFv8qq+lGJhRN0c= - github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM= - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 h1:SKvPgvdvmiTWoi0GAJ7AsJfOz3ngVkD/ERbs5pUnHNI= - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5/go.mod h1:20sz31hv/WsPa3HhU3hfrIet2kxM4Pe0r20eBZ20Tac= --github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 h1:iAckBT2OeEK/kBDyN/jDtpEExhjeeA/Im2q4X0rJZT8= --github.com/aws/aws-sdk-go-v2/service/sts v1.30.4/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= -+github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 h1:OMsEmCyz2i89XwRwPouAJvhj81wINh+4UK+k/0Yo/q8= -+github.com/aws/aws-sdk-go-v2/service/sts v1.30.5/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= - github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= - github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= - github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=",fix,"update aws-sdk-go-v2 monorepo (#13986) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -4cd0aedc76679e4b46ae7d591404debce5f9585b,2023-04-24 16:15:22,Danny Kopping,Fix output of limited response body when remote rule evaluation fails (#9253),False,"diff --git a/pkg/ruler/evaluator_remote.go b/pkg/ruler/evaluator_remote.go -index 777022ba84351..4e26e0138733d 100644 ---- a/pkg/ruler/evaluator_remote.go -+++ b/pkg/ruler/evaluator_remote.go -@@ -253,14 +253,15 @@ func (r *RemoteEvaluator) query(ctx context.Context, orgID, query string, ts tim - - fullBody := resp.Body - // created a limited reader to avoid logging the entire response body should it be very large -- limitedBody := io.LimitReader(bytes.NewReader(fullBody), 1024) -+ limitedBody := io.LimitReader(bytes.NewReader(fullBody), 128) - - // TODO(dannyk): consider retrying if the rule has a very high interval, or the rule is very sensitive to missing samples - // i.e. critical alerts or recording rules producing crucial RemoteEvaluatorMetrics series - if resp.Code/100 != 2 { - r.metrics.failedEvals.WithLabelValues(""upstream_error"", orgID).Inc() - -- level.Warn(log).Log(""msg"", ""rule evaluation failed with non-2xx response"", ""response_code"", resp.Code, ""response_body"", limitedBody) -+ respBod, _ := io.ReadAll(limitedBody) -+ level.Warn(log).Log(""msg"", ""rule evaluation failed with non-2xx response"", ""response_code"", resp.Code, ""response_body"", respBod) - return nil, fmt.Errorf(""unsuccessful/unexpected response - status code %d"", resp.Code) - }",unknown,Fix output of limited response body when remote rule evaluation fails (#9253) -f0542c04e11aa714d43351ed4d86cda4f4bf40b6,2021-07-28 19:08:43,Danny Kopping,"Updating drone signature (#4072) - -Signed-off-by: Danny Kopping ",False,"diff --git a/.drone/drone.yml b/.drone/drone.yml -index 89766b9f2516c..8108b45df82fe 100644 ---- a/.drone/drone.yml -+++ b/.drone/drone.yml -@@ -973,6 +973,6 @@ get: - - --- - kind: signature --hmac: b9ca51f266b7895bd1ea53ca40721d65915472fde3dc25fb662968282bc8acd5 -+hmac: b70be41d1a7f91c11af945a34bdbdc7a4f7613cf830c13f5438dba0bf33a1ec5 - - ...",unknown,"Updating drone signature (#4072) - -Signed-off-by: Danny Kopping " -b27f7b946feefa23de028289182bf2211f1cd36a,2019-09-03 20:55:58,Robert Fratto,"Change label used to keep issues from being marked as stale to keepalive (#965) - -The label used previously, important, implied precedence rather than -just a desire to keep an issue alive.",False,"diff --git a/.github/stale.yml b/.github/stale.yml -index 6dd83d219f4e9..db13ca4fad5f7 100644 ---- a/.github/stale.yml -+++ b/.github/stale.yml -@@ -6,7 +6,7 @@ daysUntilClose: 7 - - # Labels that prevent issues from being marked as stale - exemptLabels: -- - important -+ - keepalive - - # Label to use to identify a stale issue - staleLabel: stale",unknown,"Change label used to keep issues from being marked as stale to keepalive (#965) - -The label used previously, important, implied precedence rather than -just a desire to keep an issue alive." -e382cfe95ddd8cb84b9d554d86799f9d14182f72,2024-12-10 03:04:01,renovate[bot],"fix(deps): update module github.com/axiomhq/hyperloglog to v0.2.1 (#15322) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/go.mod b/go.mod -index 4e010d25c205b..bac9587705b5e 100644 ---- a/go.mod -+++ b/go.mod -@@ -116,7 +116,7 @@ require ( - github.com/DmitriyVTitov/size v1.5.0 - github.com/IBM/go-sdk-core/v5 v5.18.1 - github.com/IBM/ibm-cos-sdk-go v1.12.0 -- github.com/axiomhq/hyperloglog v0.2.0 -+ github.com/axiomhq/hyperloglog v0.2.1 - github.com/buger/jsonparser v1.1.1 - github.com/d4l3k/messagediff v1.2.1 - github.com/dolthub/swiss v0.2.1 -@@ -171,6 +171,7 @@ require ( - github.com/gorilla/handlers v1.5.2 // indirect - github.com/hashicorp/golang-lru v0.6.0 // indirect - github.com/imdario/mergo v0.3.16 // indirect -+ github.com/kamstrup/intmap v0.5.0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/sys/userns v0.1.0 // indirect -diff --git a/go.sum b/go.sum -index 6cb4957429f32..f366e1c40596b 100644 ---- a/go.sum -+++ b/go.sum -@@ -1006,8 +1006,8 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.16.1 h1:xsOtPAvHqhvQvBza5ohaUcfq1Lce - github.com/aws/aws-sdk-go-v2/service/sts v1.16.1/go.mod h1:Aq2/Qggh2oemSfyHH+EO4UBbgWG6zFCXLHYI4ILTY7w= - github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g= - github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= --github.com/axiomhq/hyperloglog v0.2.0 h1:u1XT3yyY1rjzlWuP6NQIrV4bRYHOaqZaovqjcBEvZJo= --github.com/axiomhq/hyperloglog v0.2.0/go.mod h1:GcgMjz9gaDKZ3G0UMS6Fq/VkZ4l7uGgcJyxA7M+omIM= -+github.com/axiomhq/hyperloglog v0.2.1 h1:z+rouIlYdpZ+DVfnQigBimhQL6OKHIL3e8+hMiud5/c= -+github.com/axiomhq/hyperloglog v0.2.1/go.mod h1:WCdOZ8PNJKNcBw3xFZ7iHlnUn1nDVHK/XToLjjmySh4= - github.com/baidubce/bce-sdk-go v0.9.205 h1:9cx93gC4FSu3W3G4NkDfFl0XMUycCpvQN+nB3doNmvg= - github.com/baidubce/bce-sdk-go v0.9.205/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= - github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= -@@ -2030,6 +2030,8 @@ github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4d - github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= - github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= - github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -+github.com/kamstrup/intmap v0.5.0 h1:WY7OJQeG7Ujc9zpPTO6PraDGSveG9js9wCPoI2q8wJQ= -+github.com/kamstrup/intmap v0.5.0/go.mod h1:gWUVWHKzWj8xpJVFf5GC0O26bWmv3GqdnIX/LMT6Aq4= - github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo= - github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= - github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -diff --git a/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go b/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go -index 638b291cd23a9..24b39e43562aa 100644 ---- a/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go -+++ b/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go -@@ -18,7 +18,7 @@ type Sketch struct { - p uint8 - m uint32 - alpha float64 -- tmpSet set -+ tmpSet *set - sparseList *compressedList - regs []uint8 - } -@@ -45,7 +45,7 @@ func NewSketch(precision uint8, sparse bool) (*Sketch, error) { - alpha: alpha(float64(m)), - } - if sparse { -- s.tmpSet = set{} -+ s.tmpSet = newSet(0) - s.sparseList = newCompressedList(0) - } else { - s.regs = make([]uint8, m) -@@ -65,7 +65,7 @@ func (sk *Sketch) Clone() *Sketch { - } - - func (sk *Sketch) maybeToNormal() { -- if uint32(len(sk.tmpSet))*100 > sk.m { -+ if uint32(sk.tmpSet.Len())*100 > sk.m { - sk.mergeSparse() - if uint32(sk.sparseList.Len()) > sk.m { - sk.toNormal() -@@ -90,9 +90,7 @@ func (sk *Sketch) Merge(other *Sketch) error { - } - - func (sk *Sketch) mergeSparseSketch(other *Sketch) { -- for k := range other.tmpSet { -- sk.tmpSet.add(k) -- } -+ sk.tmpSet.Merge(other.tmpSet) - for iter := other.sparseList.Iter(); iter.HasNext(); { - sk.tmpSet.add(iter.Next()) - } -@@ -105,10 +103,10 @@ func (sk *Sketch) mergeDenseSketch(other *Sketch) { - } - - if other.sparse() { -- for k := range other.tmpSet { -+ other.tmpSet.ForEach(func(k uint32) { - i, r := decodeHash(k, other.p, pp) - sk.insert(i, r) -- } -+ }) - for iter := other.sparseList.Iter(); iter.HasNext(); { - i, r := decodeHash(iter.Next(), other.p, pp) - sk.insert(i, r) -@@ -123,7 +121,7 @@ func (sk *Sketch) mergeDenseSketch(other *Sketch) { - } - - func (sk *Sketch) toNormal() { -- if len(sk.tmpSet) > 0 { -+ if sk.tmpSet.Len() > 0 { - sk.mergeSparse() - } - -@@ -165,17 +163,17 @@ func (sk *Sketch) Estimate() uint64 { - } - - func (sk *Sketch) mergeSparse() { -- if len(sk.tmpSet) == 0 { -+ if sk.tmpSet.Len() == 0 { - return - } - -- keys := make(uint64Slice, 0, len(sk.tmpSet)) -- for k := range sk.tmpSet { -+ keys := make(uint64Slice, 0, sk.tmpSet.Len()) -+ sk.tmpSet.ForEach(func(k uint32) { - keys = append(keys, k) -- } -+ }) - sort.Sort(keys) - -- newList := newCompressedList(4*len(sk.tmpSet) + len(sk.sparseList.b)) -+ newList := newCompressedList(4*sk.tmpSet.Len() + sk.sparseList.Len()) - for iter, i := sk.sparseList.Iter(), 0; iter.HasNext() || i < len(keys); { - if !iter.HasNext() { - newList.Append(keys[i]) -@@ -201,7 +199,7 @@ func (sk *Sketch) mergeSparse() { - } - - sk.sparseList = newList -- sk.tmpSet = set{} -+ sk.tmpSet = newSet(0) - } - - // MarshalBinary implements the encoding.BinaryMarshaler interface. -@@ -277,7 +275,7 @@ func (sk *Sketch) UnmarshalBinary(data []byte) error { - sparse := data[3] == byte(1) - - // Make a newSketch Sketch if the precision doesn't match or if the Sketch was used -- if sk.p != p || sk.regs != nil || len(sk.tmpSet) > 0 || (sk.sparseList != nil && sk.sparseList.Len() > 0) { -+ if sk.p != p || sk.regs != nil || sk.tmpSet.Len() > 0 || (sk.sparseList != nil && sk.sparseList.Len() > 0) { - newh, err := NewSketch(p, sparse) - if err != nil { - return err -@@ -292,14 +290,14 @@ func (sk *Sketch) UnmarshalBinary(data []byte) error { - - // Unmarshal the tmp_set. - tssz := binary.BigEndian.Uint32(data[4:8]) -- sk.tmpSet = make(map[uint32]struct{}, tssz) -+ sk.tmpSet = newSet(int(tssz)) - - // We need to unmarshal tssz values in total, and each value requires us - // to read 4 bytes. - tsLastByte := int((tssz * 4) + 8) - for i := 8; i < tsLastByte; i += 4 { - k := binary.BigEndian.Uint32(data[i : i+4]) -- sk.tmpSet[k] = struct{}{} -+ sk.tmpSet.add(k) - } - - // Unmarshal the sparse Sketch. -diff --git a/vendor/github.com/axiomhq/hyperloglog/sparse.go b/vendor/github.com/axiomhq/hyperloglog/sparse.go -index 8c457d3278224..0151740df9859 100644 ---- a/vendor/github.com/axiomhq/hyperloglog/sparse.go -+++ b/vendor/github.com/axiomhq/hyperloglog/sparse.go -@@ -2,6 +2,8 @@ package hyperloglog - - import ( - ""math/bits"" -+ -+ ""github.com/kamstrup/intmap"" - ) - - func getIndex(k uint32, p, pp uint8) uint32 { -@@ -34,37 +36,61 @@ func decodeHash(k uint32, p, pp uint8) (uint32, uint8) { - return getIndex(k, p, pp), r - } - --type set map[uint32]struct{} -+type set struct { -+ m *intmap.Set[uint32] -+} -+ -+func newSet(size int) *set { -+ return &set{m: intmap.NewSet[uint32](size)} -+} -+ -+func (s *set) ForEach(fn func(v uint32)) { -+ s.m.ForEach(func(v uint32) bool { -+ fn(v) -+ return true -+ }) -+} -+ -+func (s *set) Merge(other *set) { -+ other.m.ForEach(func(v uint32) bool { -+ s.m.Add(v) -+ return true -+ }) -+} -+ -+func (s *set) Len() int { -+ return s.m.Len() -+} - --func (s set) add(v uint32) bool { -- _, ok := s[v] -- if ok { -+func (s *set) add(v uint32) bool { -+ if s.m.Has(v) { - return false - } -- s[v] = struct{}{} -+ s.m.Add(v) - return true - } - --func (s set) Clone() set { -+func (s *set) Clone() *set { - if s == nil { - return nil - } - -- newS := make(map[uint32]struct{}, len(s)) -- for k, v := range s { -- newS[k] = v -- } -- return newS -+ newS := intmap.NewSet[uint32](s.m.Len()) -+ s.m.ForEach(func(v uint32) bool { -+ newS.Add(v) -+ return true -+ }) -+ return &set{m: newS} - } - --func (s set) MarshalBinary() (data []byte, err error) { -+func (s *set) MarshalBinary() (data []byte, err error) { - // 4 bytes for the size of the set, and 4 bytes for each key. - // list. -- data = make([]byte, 0, 4+(4*len(s))) -+ data = make([]byte, 0, 4+(4*s.m.Len())) - - // Length of the set. We only need 32 bits because the size of the set - // couldn't exceed that on 32 bit architectures. -- sl := len(s) -+ sl := s.m.Len() - data = append(data, []byte{ - byte(sl >> 24), - byte(sl >> 16), -@@ -73,14 +99,15 @@ func (s set) MarshalBinary() (data []byte, err error) { - }...) - - // Marshal each element in the set. -- for k := range s { -+ s.m.ForEach(func(k uint32) bool { - data = append(data, []byte{ - byte(k >> 24), - byte(k >> 16), - byte(k >> 8), - byte(k), - }...) -- } -+ return true -+ }) - - return data, nil - } -diff --git a/vendor/github.com/kamstrup/intmap/.gitignore b/vendor/github.com/kamstrup/intmap/.gitignore -new file mode 100644 -index 0000000000000..1377554ebea6f ---- /dev/null -+++ b/vendor/github.com/kamstrup/intmap/.gitignore -@@ -0,0 +1 @@ -+*.swp -diff --git a/vendor/github.com/kamstrup/intmap/LICENSE b/vendor/github.com/kamstrup/intmap/LICENSE -new file mode 100644 -index 0000000000000..1eac633b0cd30 ---- /dev/null -+++ b/vendor/github.com/kamstrup/intmap/LICENSE -@@ -0,0 +1,23 @@ -+Copyright (c) 2016, Brent Pedersen - Bioinformatics -+All rights reserved. -+ -+Redistribution and use in source and binary forms, with or without -+modification, are permitted provided that the following conditions are met: -+ -+* Redistributions of source code must retain the above copyright notice, this -+ list of conditions and the following disclaimer. -+ -+* Redistributions in binary form must reproduce the above copyright notice, -+ this list of conditions and the following disclaimer in the documentation -+ and/or other materials provided with the distribution. -+ -+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ""AS IS"" -+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -diff --git a/vendor/github.com/kamstrup/intmap/README.md b/vendor/github.com/kamstrup/intmap/README.md -new file mode 100644 -index 0000000000000..e1a1e7003aff8 ---- /dev/null -+++ b/vendor/github.com/kamstrup/intmap/README.md -@@ -0,0 +1,52 @@ -+Fast hashmap with integer keys for Golang -+ -+[![GoDoc](https://godoc.org/github.com/kamstrup/intmap?status.svg)](https://godoc.org/github.com/kamstrup/intmap) -+[![Go Report Card](https://goreportcard.com/badge/github.com/kamstrup/intmap)](https://goreportcard.com/report/github.com/kamstrup/intmap) -+ -+# intmap -+ -+ import ""github.com/kamstrup/intmap"" -+ -+Package intmap is a fast hashmap implementation for Golang, specialized for maps with integer type keys. -+The values can be of any type. -+ -+It is a full port of https://github.com/brentp/intintmap to use type parameters (aka generics). -+ -+It interleaves keys and values in the same underlying array to improve locality. -+This is also known as open addressing with linear probing. -+ -+It is up to 3X faster than the builtin map: -+``` -+name time/op -+Map64Fill-8 201ms ± 5% -+IntIntMapFill-8 207ms ±31% -+StdMapFill-8 371ms ±11% -+Map64Get10PercentHitRate-8 148µs ±40% -+IntIntMapGet10PercentHitRate-8 171µs ±50% -+StdMapGet10PercentHitRate-8 171µs ±33% -+Map64Get100PercentHitRate-8 4.50ms ± 5% -+IntIntMapGet100PercentHitRate-8 4.82ms ± 6% -+StdMapGet100PercentHitRate-8 15.5ms ±32% -+``` -+ -+## Usage -+ -+```go -+m := intmap.New[int64,int64](32768) -+m.Put(int64(1234), int64(-222)) -+m.Put(int64(123), int64(33)) -+ -+v, ok := m.Get(int64(222)) -+v, ok := m.Get(int64(333)) -+ -+m.Del(int64(222)) -+m.Del(int64(333)) -+ -+fmt.Println(m.Len()) -+ -+m.ForEach(func(k int64, v int64) { -+ fmt.Printf(""key: %d, value: %d\n"", k, v) -+}) -+ -+m.Clear() // all gone, but buffers kept -+``` -diff --git a/vendor/github.com/kamstrup/intmap/map64.go b/vendor/github.com/kamstrup/intmap/map64.go -new file mode 100644 -index 0000000000000..ec8084db9f776 ---- /dev/null -+++ b/vendor/github.com/kamstrup/intmap/map64.go -@@ -0,0 +1,442 @@ -+// Package intmap contains a fast hashmap implementation for maps with keys of any integer type -+package intmap -+ -+import ( -+ ""iter"" -+ ""math"" -+) -+ -+// IntKey is a type constraint for values that can be used as keys in Map -+type IntKey interface { -+ ~int | ~uint | ~int64 | ~uint64 | ~int32 | ~uint32 | ~int16 | ~uint16 | ~int8 | ~uint8 | ~uintptr -+} -+ -+type pair[K IntKey, V any] struct { -+ K K -+ V V -+} -+ -+const fillFactor64 = 0.7 -+ -+func phiMix64(x int) int { -+ h := x * 0x9E3779B9 -+ return h ^ (h >> 16) -+} -+ -+// Map is a hashmap where the keys are some any integer type. -+// It is valid to call methods that read a nil map, similar to a standard Go map. -+// Methods valid on a nil map are Has, Get, Len, and ForEach. -+type Map[K IntKey, V any] struct { -+ data []pair[K, V] // key-value pairs -+ size int -+ -+ zeroVal V // value of 'zero' key -+ hasZeroKey bool // do we have 'zero' key in the map? -+} -+ -+// New creates a new map with keys being any integer subtype. -+// The map can store up to the given capacity before reallocation and rehashing occurs. -+func New[K IntKey, V any](capacity int) *Map[K, V] { -+ return &Map[K, V]{ -+ data: make([]pair[K, V], arraySize(capacity, fillFactor64)), -+ } -+} -+ -+// Has checks if the given key exists in the map. -+// Calling this method on a nil map will return false. -+func (m *Map[K, V]) Has(key K) bool { -+ if m == nil { -+ return false -+ } -+ -+ if key == K(0) { -+ return m.hasZeroKey -+ } -+ -+ idx := m.startIndex(key) -+ p := m.data[idx] -+ -+ if p.K == K(0) { // end of chain already -+ return false -+ } -+ if p.K == key { // we check zero prior to this call -+ return true -+ } -+ -+ // hash collision, seek next hash match, bailing on first empty -+ for { -+ idx = m.nextIndex(idx) -+ p = m.data[idx] -+ if p.K == K(0) { -+ return false -+ } -+ if p.K == key { -+ return true -+ } -+ } -+} -+ -+// Get returns the value if the key is found. -+// If you just need to check for existence it is easier to use Has. -+// Calling this method on a nil map will return the zero value for V and false. -+func (m *Map[K, V]) Get(key K) (V, bool) { -+ if m == nil { -+ var zero V -+ return zero, false -+ } -+ -+ if key == K(0) { -+ if m.hasZeroKey { -+ return m.zeroVal, true -+ } -+ var zero V -+ return zero, false -+ } -+ -+ idx := m.startIndex(key) -+ p := m.data[idx] -+ -+ if p.K == K(0) { // end of chain already -+ var zero V -+ return zero, false -+ } -+ if p.K == key { // we check zero prior to this call -+ return p.V, true -+ } -+ -+ // hash collision, seek next hash match, bailing on first empty -+ for { -+ idx = m.nextIndex(idx) -+ p = m.data[idx] -+ if p.K == K(0) { -+ var zero V -+ return zero, false -+ } -+ if p.K == key { -+ return p.V, true -+ } -+ } -+} -+ -+// Put adds or updates key with value val. -+func (m *Map[K, V]) Put(key K, val V) { -+ if key == K(0) { -+ if !m.hasZeroKey { -+ m.size++ -+ } -+ m.zeroVal = val -+ m.hasZeroKey = true -+ return -+ } -+ -+ idx := m.startIndex(key) -+ p := &m.data[idx] -+ -+ if p.K == K(0) { // end of chain already -+ p.K = key -+ p.V = val -+ if m.size >= m.sizeThreshold() { -+ m.rehash() -+ } else { -+ m.size++ -+ } -+ return -+ } else if p.K == key { // overwrite existing value -+ p.V = val -+ return -+ } -+ -+ // hash collision, seek next empty or key match -+ for { -+ idx = m.nextIndex(idx) -+ p = &m.data[idx] -+ -+ if p.K == K(0) { -+ p.K = key -+ p.V = val -+ if m.size >= m.sizeThreshold() { -+ m.rehash() -+ } else { -+ m.size++ -+ } -+ return -+ } else if p.K == key { -+ p.V = val -+ return -+ } -+ } -+} -+ -+// PutIfNotExists adds the key-value pair only if the key does not already exist -+// in the map, and returns the current value associated with the key and a boolean -+// indicating whether the value was newly added or not. -+func (m *Map[K, V]) PutIfNotExists(key K, val V) (V, bool) { -+ if key == K(0) { -+ if m.hasZeroKey { -+ return m.zeroVal, false -+ } -+ m.zeroVal = val -+ m.hasZeroKey = true -+ m.size++ -+ return val, true -+ } -+ -+ idx := m.startIndex(key) -+ p := &m.data[idx] -+ -+ if p.K == K(0) { // end of chain already -+ p.K = key -+ p.V = val -+ m.size++ -+ if m.size >= m.sizeThreshold() { -+ m.rehash() -+ } -+ return val, true -+ } else if p.K == key { -+ return p.V, false -+ } -+ -+ // hash collision, seek next hash match, bailing on first empty -+ for { -+ idx = m.nextIndex(idx) -+ p = &m.data[idx] -+ -+ if p.K == K(0) { -+ p.K = key -+ p.V = val -+ m.size++ -+ if m.size >= m.sizeThreshold() { -+ m.rehash() -+ } -+ return val, true -+ } else if p.K == key { -+ return p.V, false -+ } -+ } -+} -+ -+// ForEach iterates through key-value pairs in the map while the function f returns true. -+// This method returns immediately if invoked on a nil map. -+// -+// The iteration order of a Map is not defined, so please avoid relying on it. -+func (m *Map[K, V]) ForEach(f func(K, V) bool) { -+ if m == nil { -+ return -+ } -+ -+ if m.hasZeroKey && !f(K(0), m.zeroVal) { -+ return -+ } -+ forEach64(m.data, f) -+} -+ -+// All returns an iterator over key-value pairs from m. -+// The iterator returns immediately if invoked on a nil map. -+// -+// The iteration order of a Map is not defined, so please avoid relying on it. -+func (m *Map[K, V]) All() iter.Seq2[K, V] { -+ return m.ForEach -+} -+ -+// Keys returns an iterator over keys in m. -+// The iterator returns immediately if invoked on a nil map. -+// -+// The iteration order of a Map is not defined, so please avoid relying on it. -+func (m *Map[K, V]) Keys() iter.Seq[K] { -+ return func(yield func(k K) bool) { -+ if m == nil { -+ return -+ } -+ -+ if m.hasZeroKey && !yield(K(0)) { -+ return -+ } -+ -+ for _, p := range m.data { -+ if p.K != K(0) && !yield(p.K) { -+ return -+ } -+ } -+ } -+} -+ -+// Values returns an iterator over values in m. -+// The iterator returns immediately if invoked on a nil map. -+// -+// The iteration order of a Map is not defined, so please avoid relying on it. -+func (m *Map[K, V]) Values() iter.Seq[V] { -+ return func(yield func(v V) bool) { -+ if m == nil { -+ return -+ } -+ -+ if m.hasZeroKey && !yield(m.zeroVal) { -+ return -+ } -+ -+ for _, p := range m.data { -+ if p.K != K(0) && !yield(p.V) { -+ return -+ } -+ } -+ } -+} -+ -+// Clear removes all items from the map, but keeps the internal buffers for reuse. -+func (m *Map[K, V]) Clear() { -+ var zero V -+ m.hasZeroKey = false -+ m.zeroVal = zero -+ -+ // compiles down to runtime.memclr() -+ for i := range m.data { -+ m.data[i] = pair[K, V]{} -+ } -+ -+ m.size = 0 -+} -+ -+func (m *Map[K, V]) rehash() { -+ oldData := m.data -+ m.data = make([]pair[K, V], 2*len(m.data)) -+ -+ // reset size -+ if m.hasZeroKey { -+ m.size = 1 -+ } else { -+ m.size = 0 -+ } -+ -+ forEach64(oldData, func(k K, v V) bool { -+ m.Put(k, v) -+ return true -+ }) -+} -+ -+// Len returns the number of elements in the map. -+// The length of a nil map is defined to be zero. -+func (m *Map[K, V]) Len() int { -+ if m == nil { -+ return 0 -+ } -+ -+ return m.size -+} -+ -+func (m *Map[K, V]) sizeThreshold() int { -+ return int(math.Floor(float64(len(m.data)) * fillFactor64)) -+} -+ -+func (m *Map[K, V]) startIndex(key K) int { -+ return phiMix64(int(key)) & (len(m.data) - 1) -+} -+ -+func (m *Map[K, V]) nextIndex(idx int) int { -+ return (idx + 1) & (len(m.data) - 1) -+} -+ -+func forEach64[K IntKey, V any](pairs []pair[K, V], f func(k K, v V) bool) { -+ for _, p := range pairs { -+ if p.K != K(0) && !f(p.K, p.V) { -+ return -+ } -+ } -+} -+ -+// Del deletes a key and its value, returning true iff the key was found -+func (m *Map[K, V]) Del(key K) bool { -+ if key == K(0) { -+ if m.hasZeroKey { -+ m.hasZeroKey = false -+ m.size-- -+ return true -+ } -+ return false -+ } -+ -+ idx := m.startIndex(key) -+ p := m.data[idx] -+ -+ if p.K == key { -+ // any keys that were pushed back needs to be shifted nack into the empty slot -+ // to avoid breaking the chain -+ m.shiftKeys(idx) -+ m.size-- -+ return true -+ } else if p.K == K(0) { // end of chain already -+ return false -+ } -+ -+ for { -+ idx = m.nextIndex(idx) -+ p = m.data[idx] -+ -+ if p.K == key { -+ // any keys that were pushed back needs to be shifted nack into the empty slot -+ // to avoid breaking the chain -+ m.shiftKeys(idx) -+ m.size-- -+ return true -+ } else if p.K == K(0) { -+ return false -+ } -+ -+ } -+} -+ -+func (m *Map[K, V]) shiftKeys(idx int) int { -+ // Shift entries with the same hash. -+ // We need to do this on deletion to ensure we don't have zeroes in the hash chain -+ for { -+ var p pair[K, V] -+ lastIdx := idx -+ idx = m.nextIndex(idx) -+ for { -+ p = m.data[idx] -+ if p.K == K(0) { -+ m.data[lastIdx] = pair[K, V]{} -+ return lastIdx -+ } -+ -+ slot := m.startIndex(p.K) -+ if lastIdx <= idx { -+ if lastIdx >= slot || slot > idx { -+ break -+ } -+ } else { -+ if lastIdx >= slot && slot > idx { -+ break -+ } -+ } -+ idx = m.nextIndex(idx) -+ } -+ m.data[lastIdx] = p -+ } -+} -+ -+func nextPowerOf2(x uint32) uint32 { -+ if x == math.MaxUint32 { -+ return x -+ } -+ -+ if x == 0 { -+ return 1 -+ } -+ -+ x-- -+ x |= x >> 1 -+ x |= x >> 2 -+ x |= x >> 4 -+ x |= x >> 8 -+ x |= x >> 16 -+ -+ return x + 1 -+} -+ -+func arraySize(exp int, fill float64) int { -+ s := nextPowerOf2(uint32(math.Ceil(float64(exp) / fill))) -+ if s < 2 { -+ s = 2 -+ } -+ return int(s) -+} -diff --git a/vendor/github.com/kamstrup/intmap/set.go b/vendor/github.com/kamstrup/intmap/set.go -new file mode 100644 -index 0000000000000..b81ce224b6036 ---- /dev/null -+++ b/vendor/github.com/kamstrup/intmap/set.go -@@ -0,0 +1,59 @@ -+package intmap -+ -+import ""iter"" -+ -+// Set is a specialization of Map modelling a set of integers. -+// Like Map, methods that read from the set are valid on the nil Set. -+// This include Has, Len, and ForEach. -+type Set[K IntKey] Map[K, struct{}] -+ -+// NewSet creates a new Set with a given initial capacity. -+func NewSet[K IntKey](capacity int) *Set[K] { -+ return (*Set[K])(New[K, struct{}](capacity)) -+} -+ -+// Add an element to the set. Returns true if the element was not already present. -+func (s *Set[K]) Add(k K) bool { -+ _, found := (*Map[K, struct{}])(s).PutIfNotExists(k, struct{}{}) -+ return found -+} -+ -+// Del deletes a key, returning true iff the key was found -+func (s *Set[K]) Del(k K) bool { -+ return (*Map[K, struct{}])(s).Del(k) -+} -+ -+// Clear removes all items from the Set, but keeps the internal buffers for reuse. -+func (s *Set[K]) Clear() { -+ (*Map[K, struct{}])(s).Clear() -+} -+ -+// Has returns true if the key is in the set. -+// If the set is nil this method always return false. -+func (s *Set[K]) Has(k K) bool { -+ return (*Map[K, struct{}])(s).Has(k) -+} -+ -+// Len returns the number of elements in the set. -+// If the set is nil this method return 0. -+func (s *Set[K]) Len() int { -+ return (*Map[K, struct{}])(s).Len() -+} -+ -+// ForEach iterates over the elements in the set while the visit function returns true. -+// This method returns immediately if the set is nil. -+// -+// The iteration order of a Set is not defined, so please avoid relying on it. -+func (s *Set[K]) ForEach(visit func(k K) bool) { -+ (*Map[K, struct{}])(s).ForEach(func(k K, _ struct{}) bool { -+ return visit(k) -+ }) -+} -+ -+// All returns an iterator over keys from the set. -+// The iterator returns immediately if the set is nil. -+// -+// The iteration order of a Set is not defined, so please avoid relying on it. -+func (s *Set[K]) All() iter.Seq[K] { -+ return s.ForEach -+} -diff --git a/vendor/modules.txt b/vendor/modules.txt -index 3168c6735b76c..7810d1ce504bf 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -462,8 +462,8 @@ github.com/aws/smithy-go/rand - github.com/aws/smithy-go/time - github.com/aws/smithy-go/transport/http - github.com/aws/smithy-go/transport/http/internal/io --# github.com/axiomhq/hyperloglog v0.2.0 --## explicit; go 1.21 -+# github.com/axiomhq/hyperloglog v0.2.1 -+## explicit; go 1.23 - github.com/axiomhq/hyperloglog - # github.com/baidubce/bce-sdk-go v0.9.205 - ## explicit; go 1.11 -@@ -1169,6 +1169,9 @@ github.com/json-iterator/go - # github.com/julienschmidt/httprouter v1.3.0 - ## explicit; go 1.7 - github.com/julienschmidt/httprouter -+# github.com/kamstrup/intmap v0.5.0 -+## explicit; go 1.23 -+github.com/kamstrup/intmap - # github.com/klauspost/compress v1.17.11 - ## explicit; go 1.21 - github.com/klauspost/compress",fix,"update module github.com/axiomhq/hyperloglog to v0.2.1 (#15322) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -5f9fe83becf2d802b29468a41e7ae08b8d29edac,2024-11-04 20:53:43,Grot (@grafanabot),"chore( operator): community release 0.7.0 (#14109) - -Co-authored-by: loki-gh-app[bot] <160051081+loki-gh-app[bot]@users.noreply.github.com>",False,"diff --git a/.release-please-manifest.json b/.release-please-manifest.json -index 96501106b6f6e..102cfe6531863 100644 ---- a/.release-please-manifest.json -+++ b/.release-please-manifest.json -@@ -1,4 +1,4 @@ - { - ""."": ""3.1.1"", -- ""operator"": ""0.6.2"" -+ ""operator"": ""0.7.0"" - } -diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md -index 7727779251bcc..17bc50ddc3194 100644 ---- a/operator/CHANGELOG.md -+++ b/operator/CHANGELOG.md -@@ -1,5 +1,42 @@ - ## Main - -+## [0.7.0](https://github.com/grafana/loki/compare/operator/v0.6.2...operator/v0.7.0) (2024-11-01) -+ -+ -+### ⚠ BREAKING CHANGES -+ -+* **operator:** Provide default OTLP attribute configuration ([#14410](https://github.com/grafana/loki/issues/14410)) -+* **operator:** Rename loki api go module ([#14568](https://github.com/grafana/loki/issues/14568)) -+* **operator:** Migrate project layout to kubebuilder go/v4 ([#14447](https://github.com/grafana/loki/issues/14447)) -+ -+### Features -+ -+* **operator:** Declare feature FIPS support for OpenShift only ([#14308](https://github.com/grafana/loki/issues/14308)) ([720c303](https://github.com/grafana/loki/commit/720c3037923c174e71a02d99d4bee6271428fbdb)) -+* **operator:** introduce 1x.pico size ([#14407](https://github.com/grafana/loki/issues/14407)) ([57de81d](https://github.com/grafana/loki/commit/57de81d8c27e221832790443cebaf141353c3e3f)) -+* **operator:** Provide default OTLP attribute configuration ([#14410](https://github.com/grafana/loki/issues/14410)) ([1b52387](https://github.com/grafana/loki/commit/1b5238721994c00764b6a7e7d63269c5b56d2480)) -+* **operator:** Update Loki operand to v3.2.1 ([#14526](https://github.com/grafana/loki/issues/14526)) ([5e970e5](https://github.com/grafana/loki/commit/5e970e50b166e73f5563e21c23db3ea99b24642e)) -+* **operator:** User-guide for OTLP configuration ([#14620](https://github.com/grafana/loki/issues/14620)) ([27b4071](https://github.com/grafana/loki/commit/27b40713540bd60918780cdd4cb645e6761427cb)) -+ -+ -+### Bug Fixes -+ -+* **deps:** update module github.com/prometheus/client_golang to v1.20.5 ([#14655](https://github.com/grafana/loki/issues/14655)) ([e12f843](https://github.com/grafana/loki/commit/e12f8436b4080db54c6d31c6af38416c6fdd7eb4)) -+* **operator:** add 1x.pico OpenShift UI dropdown menu ([#14660](https://github.com/grafana/loki/issues/14660)) ([4687f37](https://github.com/grafana/loki/commit/4687f377db0a7ae07ffdea354582c882c10b72c4)) -+* **operator:** Add missing groupBy label for all rules on OpenShift ([#14279](https://github.com/grafana/loki/issues/14279)) ([ce7b2e8](https://github.com/grafana/loki/commit/ce7b2e89d9470e4e6a61a94f2b51ff8b938b5a5e)) -+* **operator:** correctly ignore again BlotDB dashboards ([#14587](https://github.com/grafana/loki/issues/14587)) ([4879d10](https://github.com/grafana/loki/commit/4879d106bbeea29e331ddb7c9a49274600190032)) -+* **operator:** Disable automatic discovery of service name ([#14506](https://github.com/grafana/loki/issues/14506)) ([3834c74](https://github.com/grafana/loki/commit/3834c74966b307411732cd3cbaf66305008b10eb)) -+* **operator:** Disable log level discovery for OpenShift tenancy modes ([#14613](https://github.com/grafana/loki/issues/14613)) ([5034d34](https://github.com/grafana/loki/commit/5034d34ad23451954ea2459c341456da8d93d020)) -+* **operator:** Fix building the size-calculator image ([#14573](https://github.com/grafana/loki/issues/14573)) ([a79b8fe](https://github.com/grafana/loki/commit/a79b8fe7802964cbb96bde75a7502a8b1e8a23ab)) -+* **operator:** Fix make build target for size-calculator ([#14551](https://github.com/grafana/loki/issues/14551)) ([e727187](https://github.com/grafana/loki/commit/e727187ec3be2f10c80e984d00c40dad0308b036)) -+* **operator:** Move OTLP attribute for statefulset name to stream labels ([#14630](https://github.com/grafana/loki/issues/14630)) ([5df3594](https://github.com/grafana/loki/commit/5df3594f791d77031c53d7b0f5b01191de8a23f2)) -+* **operator:** Use empty initiliazed pod status map when no pods ([#14314](https://github.com/grafana/loki/issues/14314)) ([6f533ed](https://github.com/grafana/loki/commit/6f533ed4386ee2db61680a9021934bfe9a9ba749)) -+ -+ -+### Code Refactoring -+ -+* **operator:** Migrate project layout to kubebuilder go/v4 ([#14447](https://github.com/grafana/loki/issues/14447)) ([dbb3b6e](https://github.com/grafana/loki/commit/dbb3b6edc96f3545a946319c0324518800d286cf)) -+* **operator:** Rename loki api go module ([#14568](https://github.com/grafana/loki/issues/14568)) ([976d8ab](https://github.com/grafana/loki/commit/976d8ab81c1a79f35d7cec96f6a9c35a9947fa48)) -+ - ## [0.6.2](https://github.com/grafana/loki/compare/operator/v0.6.1...operator/v0.6.2) (2024-09-11)",chore,"community release 0.7.0 (#14109) - -Co-authored-by: loki-gh-app[bot] <160051081+loki-gh-app[bot]@users.noreply.github.com>" -14a5e22c31f8cdd7b86ea3848cbc971f7a50a3c7,2020-11-23 13:07:53,Sandeep Sukhani,add missing ingester query routes in loki reads and operational dashboard (#2961),False,"diff --git a/production/loki-mixin/dashboards/dashboard-loki-operational.json b/production/loki-mixin/dashboards/dashboard-loki-operational.json -index 401d51c1f1c05..f0e75caf63c0a 100644 ---- a/production/loki-mixin/dashboards/dashboard-loki-operational.json -+++ b/production/loki-mixin/dashboards/dashboard-loki-operational.json -@@ -1534,17 +1534,17 @@ - ""steppedLine"": false, - ""targets"": [ - { -- ""expr"": ""histogram_quantile(0.99, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\""$namespace/ingester\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label\"", cluster=\""$cluster\""})) * 1e3"", -+ ""expr"": ""histogram_quantile(0.99, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\""$namespace/ingester\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs\"", cluster=\""$cluster\""})) * 1e3"", - ""legendFormat"": "".99-{{route}}"", - ""refId"": ""A"" - }, - { -- ""expr"": ""histogram_quantile(0.9, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\""$namespace/ingester\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label\"", cluster=\""$cluster\""})) * 1e3"", -+ ""expr"": ""histogram_quantile(0.9, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\""$namespace/ingester\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs\"", cluster=\""$cluster\""})) * 1e3"", - ""legendFormat"": "".9-{{route}}"", - ""refId"": ""B"" - }, - { -- ""expr"": ""histogram_quantile(0.5, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\""$namespace/ingester\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label\"", cluster=\""$cluster\""})) * 1e3"", -+ ""expr"": ""histogram_quantile(0.5, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\""$namespace/ingester\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs\"", cluster=\""$cluster\""})) * 1e3"", - ""legendFormat"": "".5-{{route}}"", - ""refId"": ""C"" - } -@@ -1639,7 +1639,7 @@ - ""steppedLine"": false, - ""targets"": [ - { -- ""expr"": ""sum(rate(loki_request_duration_seconds_count{cluster=\""$cluster\"", namespace=\""$namespace\"", job=\""$namespace/ingester\"", status_code!~\""5[0-9]{2}\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label\""}[5m])) by (route)\n/\nsum(rate(loki_request_duration_seconds_count{cluster=\""$cluster\"", namespace=\""$namespace\"", job=\""$namespace/ingester\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label\""}[5m])) by (route)"", -+ ""expr"": ""sum(rate(loki_request_duration_seconds_count{cluster=\""$cluster\"", namespace=\""$namespace\"", job=\""$namespace/ingester\"", status_code!~\""5[0-9]{2}\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs\""}[5m])) by (route)\n/\nsum(rate(loki_request_duration_seconds_count{cluster=\""$cluster\"", namespace=\""$namespace\"", job=\""$namespace/ingester\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs\""}[5m])) by (route)"", - ""interval"": """", - ""legendFormat"": ""{{route}}"", - ""refId"": ""A"" -diff --git a/production/loki-mixin/dashboards/loki-reads.libsonnet b/production/loki-mixin/dashboards/loki-reads.libsonnet -index 1c5d45a826344..883c46e04b112 100644 ---- a/production/loki-mixin/dashboards/loki-reads.libsonnet -+++ b/production/loki-mixin/dashboards/loki-reads.libsonnet -@@ -6,7 +6,7 @@ local utils = import 'mixin-utils/utils.libsonnet'; - local dashboards = self, - - local http_routes = 'loki_api_v1_series|api_prom_series|api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_labels|loki_api_v1_label_name_values', -- local grpc_routes = '/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series', -+ local grpc_routes = '/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs', - - 'loki-reads.json': { - local cfg = self,",unknown,add missing ingester query routes in loki reads and operational dashboard (#2961) -1f6f4337cee64f1ef212bcc46cd6c7f429762ae7,2019-10-15 18:03:26,Cyril Tovena,"Add logql filter to match stages and drop capability (#1112) - -* Add logql filter to match stages and drop capability - -* use const string instead and remove unused value - -* Uses action property instead of drop_entries",False,"diff --git a/docs/clients/promtail/pipelines.md b/docs/clients/promtail/pipelines.md -index b72a5a0fee2f0..8e05c056dd4af 100644 ---- a/docs/clients/promtail/pipelines.md -+++ b/docs/clients/promtail/pipelines.md -@@ -18,7 +18,7 @@ stages: - 2. Change the timestamp of the log line - 3. Change the content of the log line - 4. Create a metric based on the extracted data --4. **Filtering stages** optionally apply a subset of stages based on some -+4. **Filtering stages** optionally apply a subset of stages or drop entries based on some - condition. - - Typical pipelines will start with a parsing stage (such as a -@@ -28,7 +28,7 @@ something with that extracted data. The most common action stage will be a - [labels](./stages/labels.md) stage to turn extracted data into a label. - - A common stage will also be the [match](./stages/match.md) stage to selectively --apply stages based on the current labels. -+apply stages or drop entries based on a [LogQL stream selector and filter expressions](../../logql.md). - - Note that pipelines can not currently be used to deduplicate logs; Loki will - receive the same log line multiple times if, for example: -@@ -76,9 +76,9 @@ scrape_configs: - source: timestamp - - # This stage is only going to run if the scraped target has a label of -- # ""name"" with a value of ""nginx"". -+ # ""name"" with a value of ""nginx"" and if the log line contains the word ""GET"" - - match: -- selector: '{name=""nginx""}' -+ selector: '{name=""nginx""} |= ""GET""' - stages: - # This regex stage extracts a new output by matching against some - # values and capturing the rest. -@@ -126,10 +126,10 @@ scrape_configs: - level: - component: - -- # This stage will only run if the scraped target has a label of ""app"" -- # and a value of ""some-app"". -+ # This stage will only run if the scraped target has a label ""app"" -+ # with a value of ""some-app"" and the log line doesn't contains the word ""info"" - - match: -- selector: '{app=""some-app""}' -+ selector: '{app=""some-app""} != ""info""' - stages: - # The regex stage tries to extract a Go panic by looking for panic: - # in the log message. -@@ -215,4 +215,3 @@ Action stages: - Filtering stages: - - * [match](./stages/match.md): Conditionally run stages based on the label set. -- -diff --git a/docs/clients/promtail/stages/match.md b/docs/clients/promtail/stages/match.md -index 5a931b01c2946..9744fb70806d3 100644 ---- a/docs/clients/promtail/stages/match.md -+++ b/docs/clients/promtail/stages/match.md -@@ -1,20 +1,24 @@ - # `match` stage - - The match stage is a filtering stage that conditionally applies a set of stages --when a log entry matches a configurable [LogQL](../../../logql.md) stream --selector. -+or drop entries when a log entry matches a configurable [LogQL](../../../logql.md) -+stream selector and filter expressions. - - ## Schema - - ```yaml - match: -- # LogQL stream selector. -+ # LogQL stream selector and filter expressions. - selector: - - # Names the pipeline. When defined, creates an additional label in - # the pipeline_duration_seconds histogram, where the value is - # concatenated with job_name using an underscore. -- [pipieline_name: ] -+ [pipeline_name: ] -+ -+ # When set to drop (default to keep), all entries matching the selector will -+ # be dropped. Stages must not be defined when dropping entries. -+ [action: ] - - # Nested set of pipeline stages only if the selector - # matches the labels of the log entries: -@@ -46,33 +50,39 @@ pipeline_stages: - - labels: - app: - - match: -- selector: ""{app=\""loki\""}"" -+ selector: '{app=""loki""}' - stages: - - json: - expressions: - msg: message - - match: - pipeline_name: ""app2"" -- selector: ""{app=\""pokey\""}"" -+ selector: '{app=""pokey""}' -+ action: keep - stages: - - json: - expressions: - msg: msg -+- match: -+ selector: '{app=""promtail""} |~ "".*noisy error.*""' -+ action: drop - - output: - source: msg - ``` - --And the given log line: -+And given log lines: - --``` -+```json - { ""time"":""2012-11-01T22:08:41+00:00"", ""app"":""loki"", ""component"": [""parser"",""type""], ""level"" : ""WARN"", ""message"" : ""app1 log line"" } -+{ ""time"":""2012-11-01T22:08:41+00:00"", ""app"":""promtail"", ""component"": [""parser"",""type""], ""level"" : ""ERROR"", ""message"" : ""foo noisy error"" } - ``` - --The first stage will add `app` with a value of `loki` into the extracted map, -+The first stage will add `app` with a value of `loki` into the extracted map for the first log line, - while the second stage will add `app` as a label (again with the value of `loki`). -+The second line will follow the same flow and will be added the label `app` with a value of `promtail`. - - The third stage uses LogQL to only execute the nested stages when there is a --label of `app` whose value is `loki`. This matches in our case; the nested -+label of `app` whose value is `loki`. This matches the first line in our case; the nested - `json` stage then adds `msg` into the extracted map with a value of `app1 log - line`. - -@@ -80,6 +90,9 @@ The fourth stage uses LogQL to only executed the nested stages when there is a - label of `app` whose value is `pokey`. This does **not** match in our case, so - the nested `json` stage is not ran. - -+The fifth stage will drop any entries from the application `promtail` that matches -+the regex `.*noisy error`. -+ - The final `output` stage changes the contents of the log line to be the value of - `msg` from the extracted map. In this case, the log line is changed to `app1 log - line`. -diff --git a/pkg/logentry/stages/match.go b/pkg/logentry/stages/match.go -index d59c6d704b83a..0c0838afd0b76 100644 ---- a/pkg/logentry/stages/match.go -+++ b/pkg/logentry/stages/match.go -@@ -3,12 +3,13 @@ package stages - import ( - ""time"" - -+ ""github.com/prometheus/prometheus/pkg/labels"" -+ - ""github.com/go-kit/kit/log"" - ""github.com/mitchellh/mapstructure"" - ""github.com/pkg/errors"" - ""github.com/prometheus/client_golang/prometheus"" - ""github.com/prometheus/common/model"" -- ""github.com/prometheus/prometheus/pkg/labels"" - - ""github.com/grafana/loki/pkg/logql"" - ) -@@ -19,6 +20,10 @@ const ( - ErrSelectorRequired = ""selector statement required for match stage"" - ErrMatchRequiresStages = ""match stage requires at least one additional stage to be defined in '- stages'"" - ErrSelectorSyntax = ""invalid selector syntax for match stage"" -+ ErrStagesWithDropLine = ""match stage configured to drop entries cannot contains stages"" -+ ErrUnknownMatchAction = ""match stage action should be 'keep' or 'drop'"" -+ MatchActionKeep = ""keep"" -+ MatchActionDrop = ""drop"" - ) - - // MatcherConfig contains the configuration for a matcherStage -@@ -26,10 +31,11 @@ type MatcherConfig struct { - PipelineName *string `mapstructure:""pipeline_name""` - Selector string `mapstructure:""selector""` - Stages PipelineStages `mapstructure:""stages""` -+ Action string `mapstructure:""action""` - } - - // validateMatcherConfig validates the MatcherConfig for the matcherStage --func validateMatcherConfig(cfg *MatcherConfig) ([]*labels.Matcher, error) { -+func validateMatcherConfig(cfg *MatcherConfig) (logql.LogSelectorExpr, error) { - if cfg == nil { - return nil, errors.New(ErrEmptyMatchStageConfig) - } -@@ -39,14 +45,26 @@ func validateMatcherConfig(cfg *MatcherConfig) ([]*labels.Matcher, error) { - if cfg.Selector == """" { - return nil, errors.New(ErrSelectorRequired) - } -- if cfg.Stages == nil || len(cfg.Stages) == 0 { -+ switch cfg.Action { -+ case MatchActionKeep, MatchActionDrop: -+ case """": -+ cfg.Action = MatchActionKeep -+ default: -+ return nil, errors.New(ErrUnknownMatchAction) -+ } -+ -+ if cfg.Action == MatchActionKeep && (cfg.Stages == nil || len(cfg.Stages) == 0) { - return nil, errors.New(ErrMatchRequiresStages) - } -- matchers, err := logql.ParseMatchers(cfg.Selector) -+ if cfg.Action == MatchActionDrop && (cfg.Stages != nil && len(cfg.Stages) != 0) { -+ return nil, errors.New(ErrStagesWithDropLine) -+ } -+ -+ selector, err := logql.ParseLogSelector(cfg.Selector) - if err != nil { - return nil, errors.Wrap(err, ErrSelectorSyntax) - } -- return matchers, nil -+ return selector, nil - } - - // newMatcherStage creates a new matcherStage from config -@@ -56,7 +74,7 @@ func newMatcherStage(logger log.Logger, jobName *string, config interface{}, reg - if err != nil { - return nil, err - } -- matchers, err := validateMatcherConfig(cfg) -+ selector, err := validateMatcherConfig(cfg) - if err != nil { - return nil, err - } -@@ -67,21 +85,34 @@ func newMatcherStage(logger log.Logger, jobName *string, config interface{}, reg - nPtr = &name - } - -- pl, err := NewPipeline(logger, cfg.Stages, nPtr, registerer) -+ var pl *Pipeline -+ if cfg.Action == MatchActionKeep { -+ var err error -+ pl, err = NewPipeline(logger, cfg.Stages, nPtr, registerer) -+ if err != nil { -+ return nil, errors.Wrapf(err, ""match stage failed to create pipeline from config: %v"", config) -+ } -+ } -+ -+ filter, err := selector.Filter() - if err != nil { -- return nil, errors.Wrapf(err, ""match stage failed to create pipeline from config: %v"", config) -+ return nil, errors.Wrap(err, ""error parsing filter"") - } - - return &matcherStage{ -- matchers: matchers, -+ matchers: selector.Matchers(), - pipeline: pl, -+ action: cfg.Action, -+ filter: filter, - }, nil - } - - // matcherStage applies Label matchers to determine if the include stages should be run - type matcherStage struct { - matchers []*labels.Matcher -+ filter logql.Filter - pipeline Stage -+ action string - } - - // Process implements Stage -@@ -91,7 +122,15 @@ func (m *matcherStage) Process(labels model.LabelSet, extracted map[string]inter - return - } - } -- m.pipeline.Process(labels, extracted, t, entry) -+ if m.filter == nil || m.filter([]byte(*entry)) { -+ switch m.action { -+ case MatchActionDrop: -+ // Adds the drop label to not be sent by the api.EntryHandler -+ labels[dropLabel] = """" -+ case MatchActionKeep: -+ m.pipeline.Process(labels, extracted, t, entry) -+ } -+ } - } - - // Name implements Stage -diff --git a/pkg/logentry/stages/match_test.go b/pkg/logentry/stages/match_test.go -index 9849d064309fa..843e2b8dd24a3 100644 ---- a/pkg/logentry/stages/match_test.go -+++ b/pkg/logentry/stages/match_test.go -@@ -99,43 +99,70 @@ func TestMatchPipeline(t *testing.T) { - func TestMatcher(t *testing.T) { - t.Parallel() - tests := []struct { -- matcher string -- labels map[string]string -+ selector string -+ labels map[string]string -+ action string - -- shouldRun bool -- wantErr bool -+ shouldDrop bool -+ shouldRun bool -+ wantErr bool - }{ -- {""{foo=\""bar\""} |= \""foo\"""", map[string]string{""foo"": ""bar""}, false, true}, -- {""{foo=\""bar\""} |~ \""foo\"""", map[string]string{""foo"": ""bar""}, false, true}, -- {""foo"", map[string]string{""foo"": ""bar""}, false, true}, -- {""{}"", map[string]string{""foo"": ""bar""}, false, true}, -- {""{"", map[string]string{""foo"": ""bar""}, false, true}, -- {"""", map[string]string{""foo"": ""bar""}, true, true}, -- {""{foo=\""bar\""}"", map[string]string{""foo"": ""bar""}, true, false}, -- {""{foo=\""\""}"", map[string]string{""foo"": ""bar""}, false, false}, -- {""{foo=\""\""}"", map[string]string{}, true, false}, -- {""{foo!=\""bar\""}"", map[string]string{""foo"": ""bar""}, false, false}, -- {""{foo=\""bar\"",bar!=\""test\""}"", map[string]string{""foo"": ""bar""}, true, false}, -- {""{foo=\""bar\"",bar!=\""test\""}"", map[string]string{""foo"": ""bar"", ""bar"": ""test""}, false, false}, -- {""{foo=\""bar\"",bar=~\""te.*\""}"", map[string]string{""foo"": ""bar"", ""bar"": ""test""}, true, false}, -- {""{foo=\""bar\"",bar!~\""te.*\""}"", map[string]string{""foo"": ""bar"", ""bar"": ""test""}, false, false}, -- {""{foo=\""\""}"", map[string]string{}, true, false}, -+ {`{foo=""bar""} |= ""foo""`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, true, false}, -+ {`{foo=""bar""} |~ ""foo""`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, true, false}, -+ {`{foo=""bar""} |= ""bar""`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, false, false}, -+ {`{foo=""bar""} |~ ""bar""`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, false, false}, -+ {`{foo=""bar""} != ""bar""`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, true, false}, -+ {`{foo=""bar""} !~ ""bar""`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, true, false}, -+ {`{foo=""bar""} != ""foo""`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, false, false}, -+ {`{foo=""bar""} |= ""foo""`, map[string]string{""foo"": ""bar""}, MatchActionDrop, true, false, false}, -+ {`{foo=""bar""} |~ ""foo""`, map[string]string{""foo"": ""bar""}, MatchActionDrop, true, false, false}, -+ {`{foo=""bar""} |= ""bar""`, map[string]string{""foo"": ""bar""}, MatchActionDrop, false, false, false}, -+ {`{foo=""bar""} |~ ""bar""`, map[string]string{""foo"": ""bar""}, MatchActionDrop, false, false, false}, -+ {`{foo=""bar""} != ""bar""`, map[string]string{""foo"": ""bar""}, MatchActionDrop, true, false, false}, -+ {`{foo=""bar""} !~ ""bar""`, map[string]string{""foo"": ""bar""}, MatchActionDrop, true, false, false}, -+ {`{foo=""bar""} != ""foo""`, map[string]string{""foo"": ""bar""}, MatchActionDrop, false, false, false}, -+ {`{foo=""bar""} !~ ""[]""`, map[string]string{""foo"": ""bar""}, MatchActionDrop, false, false, true}, -+ {""foo"", map[string]string{""foo"": ""bar""}, MatchActionKeep, false, false, true}, -+ {""{}"", map[string]string{""foo"": ""bar""}, MatchActionKeep, false, false, true}, -+ {""{"", map[string]string{""foo"": ""bar""}, MatchActionKeep, false, false, true}, -+ {"""", map[string]string{""foo"": ""bar""}, MatchActionKeep, false, true, true}, -+ {`{foo=""bar""}`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, true, false}, -+ {`{foo=""""}`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, false, false}, -+ {`{foo=""""}`, map[string]string{}, MatchActionKeep, false, true, false}, -+ {`{foo!=""bar""}`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, false, false}, -+ {`{foo!=""bar""}`, map[string]string{""foo"": ""bar""}, MatchActionDrop, false, false, false}, -+ {`{foo=""bar"",bar!=""test""}`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, true, false}, -+ {`{foo=""bar"",bar!=""test""}`, map[string]string{""foo"": ""bar""}, MatchActionDrop, true, false, false}, -+ {`{foo=""bar"",bar!=""test""}`, map[string]string{""foo"": ""bar"", ""bar"": ""test""}, MatchActionKeep, false, false, false}, -+ {`{foo=""bar"",bar=~""te.*""}`, map[string]string{""foo"": ""bar"", ""bar"": ""test""}, MatchActionDrop, true, false, false}, -+ {`{foo=""bar"",bar=~""te.*""}`, map[string]string{""foo"": ""bar"", ""bar"": ""test""}, MatchActionKeep, false, true, false}, -+ {`{foo=""bar"",bar!~""te.*""}`, map[string]string{""foo"": ""bar"", ""bar"": ""test""}, MatchActionKeep, false, false, false}, -+ {`{foo=""bar"",bar!~""te.*""}`, map[string]string{""foo"": ""bar"", ""bar"": ""test""}, MatchActionDrop, false, false, false}, -+ -+ {`{foo=""""}`, map[string]string{}, MatchActionKeep, false, true, false}, - } - - for _, tt := range tests { -- t.Run(fmt.Sprintf(""%s/%s"", tt.matcher, tt.labels), func(t *testing.T) { -+ name := fmt.Sprintf(""%s/%s/%s"", tt.selector, tt.labels, tt.action) -+ -+ t.Run(name, func(t *testing.T) { - // Build a match config which has a simple label stage that when matched will add the test_label to - // the labels in the pipeline. -- matchConfig := MatcherConfig{ -- nil, -- tt.matcher, -- PipelineStages{ -+ var stages PipelineStages -+ if tt.action != MatchActionDrop { -+ stages = PipelineStages{ - PipelineStage{ - StageTypeLabel: LabelsConfig{ - ""test_label"": nil, - }, - }, -- }, -+ } -+ } -+ matchConfig := MatcherConfig{ -+ nil, -+ tt.selector, -+ stages, -+ tt.action, - } - s, err := newMatcherStage(util.Logger, nil, matchConfig, prometheus.DefaultRegisterer) - if (err != nil) != tt.wantErr { -@@ -143,7 +170,7 @@ func TestMatcher(t *testing.T) { - return - } - if s != nil { -- ts, entry := time.Now(), """" -+ ts, entry := time.Now(), ""foo"" - extracted := map[string]interface{}{ - ""test_label"": ""unimportant value"", - } -@@ -156,6 +183,41 @@ func TestMatcher(t *testing.T) { - t.Error(""stage ran but should have not"") - } - } -+ if tt.shouldDrop { -+ if _, ok := labels[dropLabel]; !ok { -+ t.Error(""stage should have been dropped"") -+ } -+ } -+ } -+ }) -+ } -+} -+ -+func Test_validateMatcherConfig(t *testing.T) { -+ empty := """" -+ notempty := ""test"" -+ tests := []struct { -+ name string -+ cfg *MatcherConfig -+ wantErr bool -+ }{ -+ {""empty"", nil, true}, -+ {""pipeline name required"", &MatcherConfig{PipelineName: &empty}, true}, -+ {""selector required"", &MatcherConfig{PipelineName: ¬empty, Selector: """"}, true}, -+ {""nil stages without dropping"", &MatcherConfig{PipelineName: ¬empty, Selector: `{app=""foo""}`, Action: MatchActionKeep, Stages: nil}, true}, -+ {""empty stages without dropping"", &MatcherConfig{PipelineName: ¬empty, Selector: `{app=""foo""}`, Action: MatchActionKeep, Stages: []interface{}{}}, true}, -+ {""stages with dropping"", &MatcherConfig{PipelineName: ¬empty, Selector: `{app=""foo""}`, Action: MatchActionDrop, Stages: []interface{}{""""}}, true}, -+ {""empty stages dropping"", &MatcherConfig{PipelineName: ¬empty, Selector: `{app=""foo""}`, Action: MatchActionDrop, Stages: []interface{}{}}, false}, -+ {""stages without dropping"", &MatcherConfig{PipelineName: ¬empty, Selector: `{app=""foo""}`, Action: MatchActionKeep, Stages: []interface{}{""""}}, false}, -+ {""bad selector"", &MatcherConfig{PipelineName: ¬empty, Selector: `{app=""foo}`, Action: MatchActionKeep, Stages: []interface{}{""""}}, true}, -+ {""bad action"", &MatcherConfig{PipelineName: ¬empty, Selector: `{app=""foo}`, Action: ""nope"", Stages: []interface{}{""""}}, true}, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ _, err := validateMatcherConfig(tt.cfg) -+ if (err != nil) != tt.wantErr { -+ t.Errorf(""validateMatcherConfig() error = %v, wantErr %v"", err, tt.wantErr) -+ return - } - }) - } -diff --git a/pkg/logentry/stages/pipeline.go b/pkg/logentry/stages/pipeline.go -index 384e9825cb701..2974850bc5cee 100644 ---- a/pkg/logentry/stages/pipeline.go -+++ b/pkg/logentry/stages/pipeline.go -@@ -12,6 +12,8 @@ import ( - ""github.com/grafana/loki/pkg/promtail/api"" - ) - -+const dropLabel = ""__drop__"" -+ - // PipelineStages contains configuration for each stage within a pipeline - type PipelineStages = []interface{} - -@@ -109,6 +111,10 @@ func (p *Pipeline) Wrap(next api.EntryHandler) api.EntryHandler { - return api.EntryHandlerFunc(func(labels model.LabelSet, timestamp time.Time, line string) error { - extracted := map[string]interface{}{} - p.Process(labels, extracted, ×tamp, &line) -+ // if the labels set contains the __drop__ label we don't send this entry to the next EntryHandler -+ if _, ok := labels[dropLabel]; ok { -+ return nil -+ } - return next.Handle(labels, timestamp, line) - }) - } -diff --git a/pkg/logentry/stages/pipeline_test.go b/pkg/logentry/stages/pipeline_test.go -index 7b5dd0f89da82..fc8023b38cb68 100644 ---- a/pkg/logentry/stages/pipeline_test.go -+++ b/pkg/logentry/stages/pipeline_test.go -@@ -11,7 +11,6 @@ import ( - ""github.com/prometheus/common/model"" - ""github.com/stretchr/testify/assert"" - ""github.com/stretchr/testify/require"" -- - ""gopkg.in/yaml.v2"" - ) - -@@ -190,3 +189,64 @@ func BenchmarkPipeline(b *testing.B) { - }) - } - } -+ -+type stubHandler struct { -+ bool -+} -+ -+func (s *stubHandler) Handle(labels model.LabelSet, time time.Time, entry string) error { -+ s.bool = true -+ return nil -+} -+ -+func TestPipeline_Wrap(t *testing.T) { -+ now := time.Now() -+ var config map[string]interface{} -+ err := yaml.Unmarshal([]byte(testYaml), &config) -+ if err != nil { -+ panic(err) -+ } -+ p, err := NewPipeline(util.Logger, config[""pipeline_stages""].([]interface{}), nil, prometheus.DefaultRegisterer) -+ if err != nil { -+ panic(err) -+ } -+ -+ tests := map[string]struct { -+ labels model.LabelSet -+ shouldSend bool -+ }{ -+ ""should drop"": { -+ map[model.LabelName]model.LabelValue{ -+ dropLabel: ""true"", -+ ""stream"": ""stderr"", -+ ""action"": ""GET"", -+ ""status_code"": ""200"", -+ }, -+ false, -+ }, -+ ""should send"": { -+ map[model.LabelName]model.LabelValue{ -+ ""stream"": ""stderr"", -+ ""action"": ""GET"", -+ ""status_code"": ""200"", -+ }, -+ true, -+ }, -+ } -+ -+ for tName, tt := range tests { -+ tt := tt -+ t.Run(tName, func(t *testing.T) { -+ t.Parallel() -+ extracted := map[string]interface{}{} -+ p.Process(tt.labels, extracted, &now, &rawTestLine) -+ stub := &stubHandler{} -+ handler := p.Wrap(stub) -+ if err := handler.Handle(tt.labels, now, rawTestLine); err != nil { -+ t.Fatalf(""failed to handle entry: %v"", err) -+ } -+ assert.Equal(t, stub.bool, tt.shouldSend) -+ -+ }) -+ } -+}",unknown,"Add logql filter to match stages and drop capability (#1112) - -* Add logql filter to match stages and drop capability - -* use const string instead and remove unused value - -* Uses action property instead of drop_entries" -01a4de1bfbff17dccfc87e226d715cb86873e930,2024-03-25 21:39:52,J Stickler,docs: [style] quickstart as one word (#12301),False,"diff --git a/docs/sources/get-started/_index.md b/docs/sources/get-started/_index.md -index 36daa54cff0bc..5860ec1cc4fa5 100644 ---- a/docs/sources/get-started/_index.md -+++ b/docs/sources/get-started/_index.md -@@ -32,7 +32,6 @@ To collect logs and view your log data generally involves the following steps: - - **Next steps:** Learn more about Loki’s query language, [LogQL](https://grafana.com/docs/loki/latest/query/). - -- - ## Example Grafana Agent configuration file to ship Kubernetes Pod logs to Loki - - To deploy Grafana Agent to collect Pod logs from your Kubernetes cluster and ship them to Loki, you an use the Grafana Agent Helm chart, and a `values.yaml` file. -@@ -40,7 +39,6 @@ To deploy Grafana Agent to collect Pod logs from your Kubernetes cluster and shi - 1. Install Loki with the [Helm chart](https://grafana.com/docs/loki/latest/setup/install/helm/install-scalable/). - 1. Deploy the Grafana Agent, using the [Grafana Agent Helm chart](https://grafana.com/docs/agent/latest/flow/setup/install/kubernetes/) and this example `values.yaml` file updating the value for `forward_to = [loki.write.endpoint.receiver]`: - -- - ```yaml - agent: - mounts: -@@ -101,14 +99,15 @@ agent: - } - - ``` -- - - 1. Then install Grafana Agent in your Kubernetes cluster using: - - ```bash - helm upgrade -f values.yaml agent grafana/grafana-agent - ``` -+ - This sample file is configured to: -+ - - Install Grafana Agent to discover Pod logs. - - Add `container` and `pod` labels to the logs. - - Push the logs to your Loki cluster using the tenant ID `cloud`. -diff --git a/docs/sources/get-started/quick-start.md b/docs/sources/get-started/quick-start.md -index 70cbfc2c57d21..b4213e233546d 100644 ---- a/docs/sources/get-started/quick-start.md -+++ b/docs/sources/get-started/quick-start.md -@@ -1,11 +1,11 @@ - --- --title: Quick start to run Loki locally --menuTitle: Loki quick start -+title: Quickstart to run Loki locally -+menuTitle: Loki quickstart - weight: 550 - description: How to create and use a simple local Loki cluster for testing and evaluation purposes. - --- - --# Quick start to run Loki locally -+# Quickstart to run Loki locally - - If you want to experiment with Loki, you can run Loki locally using the Docker Compose file that ships with Loki. It runs Loki in a [monolithic deployment](https://grafana.com/docs/loki/latest/get-started/deployment-modes/#monolithic-mode) mode and includes a sample application to generate logs. - -@@ -24,11 +24,12 @@ The Docker Compose configuration instantiates the following components, each in - ## Installing Loki and collecting sample logs - - Prerequisites -+ - - [Docker](https://docs.docker.com/install) - - [Docker Compose](https://docs.docker.com/compose/install) - - {{% admonition type=""note"" %}} --This quick start assumes you are running Linux. -+This quickstart assumes you are running Linux. - {{% /admonition %}} - - **To install Loki locally, follow these steps:** -@@ -57,6 +58,7 @@ This quick start assumes you are running Linux. - ``` - - You should see something similar to the following: -+ - ```bash - ✔ Network evaluate-loki_loki Created 0.1s - ✔ Container evaluate-loki-minio-1 Started 0.6s -@@ -99,30 +101,37 @@ Once you have collected logs, you will want to view them. You can view your log - Here are some basic sample queries to get you started using LogQL. Note that these queries assume that you followed the instructions to create a directory called `evaluate-loki`. If you installed in a different directory, you’ll need to modify these queries to match your installation directory. After copying any of these queries into the query editor, click **Run Query** (4) to execute the query. - - 1. View all the log lines which have the container label ""flog"": -+ - ```bash - {container=""evaluate-loki-flog-1""} - ``` -+ - In Loki, this is called a log stream. Loki uses [labels](https://grafana.com/docs/loki/latest/get-started/labels/) as metadata to describe log streams. Loki queries always start with a label selector. In the query above, the label selector is `container`. - - 1. To view all the log lines which have the container label ""grafana"": -+ - ```bash - {container=""evaluate-loki-grafana-1""} - ``` - - 1. Find all the log lines in the container=flog stream that contain the string ""status"": -+ - ```bash - {container=""evaluate-loki-flog-1""} |= `status` - ``` - - 1. Find all the log lines in the container=flog stream where the JSON field ""status"" is ""404"": -+ - ```bash - {container=""evaluate-loki-flog-1""} | json | status=`404` - ``` - - 1. Calculate the number of logs per second where the JSON field ""status"" is ""404"": -+ - ```bash - sum by(container) (rate({container=""evaluate-loki-flog-1""} | json | status=`404` [$__auto])) - ``` -+ - The final query above is a metric query which returns a time series. This will trigger Grafana to draw a graph of the results. You can change the type of graph for a different view of the data. Click **Bars** to view a bar graph of the data. - - 1. Click the **Builder** tab (3) to return to Builder mode in the query editor. -@@ -134,30 +143,37 @@ Once you have collected logs, you will want to view them. You can view your log - For a thorough introduction to LogQL, refer to the [LogQL reference](https://grafana.com/docs/loki/latest/query/). - - ## Sample queries (code view) -+ - Here are some more sample queries that you can run using the Flog sample data. - - To see all the log lines that flog has generated, enter the LogQL query: -+ - ```bash - {container=""evaluate-loki-flog-1""}|= `` - ``` --The flog app generates log lines for simulated HTTP requests. -+ -+The flog app generates log lines for simulated HTTP requests. - - To see all `GET` log lines, enter the LogQL query: -+ - ```bash - {container=""evaluate-loki-flog-1""} |= ""GET"" - ``` - - To see all `POST` methods, enter the LogQL query: -+ - ```bash - {container=""evaluate-loki-flog-1""} |= ""POST"" - ``` - - To see every log line with a 401 status (unauthorized error), enter the LogQL query: -+ - ```bash - {container=""evaluate-loki-flog-1""} | json | status=""401"" - ``` - - To see every log line that does not contain the value 401: -+ - ```bash - {container=""evaluate-loki-flog-1""} != ""401"" - ```",docs,[style] quickstart as one word (#12301) -0ab1b28812ec44a9ece076c5144992f2bc69a8a6,2020-04-30 02:20:40,Ed Welch,"Loki: Improve logging and add metrics to streams dropped by stream limit (#2012) - -* Improve the log message when we drop streams because a user is hitting a stream limit. -Increment the dropped samples metrics when this happens also. - -Signed-off-by: Ed Welch - -* improving comments - -Signed-off-by: Ed Welch ",False,"diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go -index 7d05eb97608c2..afcbc5d72526f 100644 ---- a/pkg/ingester/instance.go -+++ b/pkg/ingester/instance.go -@@ -2,6 +2,7 @@ package ingester - - import ( - ""context"" -+ ""github.com/grafana/loki/pkg/util/validation"" - ""net/http"" - ""sync"" - ""time"" -@@ -129,13 +130,8 @@ func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error { - - var appendErr error - for _, s := range req.Streams { -- labels, err := util.ToClientLabels(s.Labels) -- if err != nil { -- appendErr = err -- continue -- } - -- stream, err := i.getOrCreateStream(labels) -+ stream, err := i.getOrCreateStream(s) - if err != nil { - appendErr = err - continue -@@ -153,7 +149,11 @@ func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error { - return appendErr - } - --func (i *instance) getOrCreateStream(labels []client.LabelAdapter) (*stream, error) { -+func (i *instance) getOrCreateStream(pushReqStream *logproto.Stream) (*stream, error) { -+ labels, err := util.ToClientLabels(pushReqStream.Labels) -+ if err != nil { -+ return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) -+ } - rawFp := client.FastFingerprint(labels) - fp := i.mapper.mapFP(rawFp, labels) - -@@ -162,8 +162,14 @@ func (i *instance) getOrCreateStream(labels []client.LabelAdapter) (*stream, err - return stream, nil - } - -- err := i.limiter.AssertMaxStreamsPerUser(i.instanceID, len(i.streams)) -+ err = i.limiter.AssertMaxStreamsPerUser(i.instanceID, len(i.streams)) - if err != nil { -+ validation.DiscardedSamples.WithLabelValues(validation.StreamLimit, i.instanceID).Add(float64(len(pushReqStream.Entries))) -+ bytes := 0 -+ for _, e := range pushReqStream.Entries { -+ bytes += len(e.Line) -+ } -+ validation.DiscardedBytes.WithLabelValues(validation.StreamLimit, i.instanceID).Add(float64(bytes)) - return nil, httpgrpc.Errorf(http.StatusTooManyRequests, err.Error()) - } - -diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go -index 50bfed5473882..c425672e207f2 100644 ---- a/pkg/ingester/instance_test.go -+++ b/pkg/ingester/instance_test.go -@@ -10,8 +10,6 @@ import ( - - ""github.com/prometheus/prometheus/pkg/labels"" - -- ""github.com/grafana/loki/pkg/util"" -- - ""github.com/grafana/loki/pkg/chunkenc"" - ""github.com/grafana/loki/pkg/logproto"" - -@@ -124,15 +122,12 @@ func TestSyncPeriod(t *testing.T) { - result = append(result, logproto.Entry{Timestamp: tt, Line: fmt.Sprintf(""hello %d"", i)}) - tt = tt.Add(time.Duration(1 + rand.Int63n(randomStep.Nanoseconds()))) - } -- -- err = inst.Push(context.Background(), &logproto.PushRequest{Streams: []*logproto.Stream{{Labels: lbls, Entries: result}}}) -- require.NoError(t, err) -- -- // let's verify results. -- ls, err := util.ToClientLabels(lbls) -+ pr := &logproto.PushRequest{Streams: []*logproto.Stream{{Labels: lbls, Entries: result}}} -+ err = inst.Push(context.Background(), pr) - require.NoError(t, err) - -- s, err := inst.getOrCreateStream(ls) -+ // let's verify results -+ s, err := inst.getOrCreateStream(pr.Streams[0]) - require.NoError(t, err) - - // make sure each chunk spans max 'sync period' time -diff --git a/pkg/ingester/limiter.go b/pkg/ingester/limiter.go -index 5f1b52002754e..382c1c0be70bb 100644 ---- a/pkg/ingester/limiter.go -+++ b/pkg/ingester/limiter.go -@@ -8,7 +8,7 @@ import ( - ) - - const ( -- errMaxStreamsPerUserLimitExceeded = ""per-user streams limit (local: %d global: %d actual local: %d) exceeded"" -+ errMaxStreamsPerUserLimitExceeded = ""tenant '%v' per-user streams limit exceeded, streams: %d exceeds calculated limit: %d (local limit: %d, global limit: %d, global/ingesters: %d)"" - ) - - // RingCount is the interface exposed by a ring implementation which allows -@@ -37,32 +37,28 @@ func NewLimiter(limits *validation.Overrides, ring RingCount, replicationFactor - // AssertMaxStreamsPerUser ensures limit has not been reached compared to the current - // number of streams in input and returns an error if so. - func (l *Limiter) AssertMaxStreamsPerUser(userID string, streams int) error { -- actualLimit := l.maxStreamsPerUser(userID) -- if streams < actualLimit { -- return nil -- } -- -- localLimit := l.limits.MaxLocalStreamsPerUser(userID) -- globalLimit := l.limits.MaxGlobalStreamsPerUser(userID) -- -- return fmt.Errorf(errMaxStreamsPerUserLimitExceeded, localLimit, globalLimit, actualLimit) --} -- --func (l *Limiter) maxStreamsPerUser(userID string) int { -+ // Start by setting the local limit either from override or default - localLimit := l.limits.MaxLocalStreamsPerUser(userID) - - // We can assume that streams are evenly distributed across ingesters - // so we do convert the global limit into a local limit - globalLimit := l.limits.MaxGlobalStreamsPerUser(userID) -- localLimit = l.minNonZero(localLimit, l.convertGlobalToLocalLimit(globalLimit)) -+ adjustedGlobalLimit := l.convertGlobalToLocalLimit(globalLimit) -+ -+ // Set the calculated limit to the lesser of the local limit or the new calculated global limit -+ calculatedLimit := l.minNonZero(localLimit, adjustedGlobalLimit) - - // If both the local and global limits are disabled, we just - // use the largest int value -- if localLimit == 0 { -- localLimit = math.MaxInt32 -+ if calculatedLimit == 0 { -+ calculatedLimit = math.MaxInt32 -+ } -+ -+ if streams < calculatedLimit { -+ return nil - } - -- return localLimit -+ return fmt.Errorf(errMaxStreamsPerUserLimitExceeded, userID, streams, calculatedLimit, localLimit, globalLimit, adjustedGlobalLimit) - } - - func (l *Limiter) convertGlobalToLocalLimit(globalLimit int) int { -diff --git a/pkg/ingester/limiter_test.go b/pkg/ingester/limiter_test.go -index c01a06862824d..e43e65d74b205 100644 ---- a/pkg/ingester/limiter_test.go -+++ b/pkg/ingester/limiter_test.go -@@ -11,112 +11,86 @@ import ( - ""github.com/grafana/loki/pkg/util/validation"" - ) - --func TestLimiter_maxStreamsPerUser(t *testing.T) { -+func TestLimiter_AssertMaxStreamsPerUser(t *testing.T) { - tests := map[string]struct { - maxLocalStreamsPerUser int - maxGlobalStreamsPerUser int - ringReplicationFactor int - ringIngesterCount int -- expected int -+ streams int -+ expected error - }{ -+ ""both local and global limit are disabled"": { -+ maxLocalStreamsPerUser: 0, -+ maxGlobalStreamsPerUser: 0, -+ ringReplicationFactor: 1, -+ ringIngesterCount: 1, -+ streams: 100, -+ expected: nil, -+ }, -+ ""current number of streams is below the limit"": { -+ maxLocalStreamsPerUser: 0, -+ maxGlobalStreamsPerUser: 1000, -+ ringReplicationFactor: 3, -+ ringIngesterCount: 10, -+ streams: 299, -+ expected: nil, -+ }, -+ ""current number of streams is above the limit"": { -+ maxLocalStreamsPerUser: 0, -+ maxGlobalStreamsPerUser: 1000, -+ ringReplicationFactor: 3, -+ ringIngesterCount: 10, -+ streams: 300, -+ expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, ""test"", 300, 300, 0, 1000, 300), -+ }, - ""both local and global limits are disabled"": { - maxLocalStreamsPerUser: 0, - maxGlobalStreamsPerUser: 0, - ringReplicationFactor: 1, - ringIngesterCount: 1, -- expected: math.MaxInt32, -+ streams: math.MaxInt32 - 1, -+ expected: nil, - }, - ""only local limit is enabled"": { - maxLocalStreamsPerUser: 1000, - maxGlobalStreamsPerUser: 0, - ringReplicationFactor: 1, - ringIngesterCount: 1, -- expected: 1000, -+ streams: 3000, -+ expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, ""test"", 3000, 1000, 1000, 0, 0), - }, - ""only global limit is enabled with replication-factor=1"": { - maxLocalStreamsPerUser: 0, - maxGlobalStreamsPerUser: 1000, - ringReplicationFactor: 1, - ringIngesterCount: 10, -- expected: 100, -+ streams: 3000, -+ expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, ""test"", 3000, 100, 0, 1000, 100), - }, - ""only global limit is enabled with replication-factor=3"": { - maxLocalStreamsPerUser: 0, - maxGlobalStreamsPerUser: 1000, - ringReplicationFactor: 3, - ringIngesterCount: 10, -- expected: 300, -+ streams: 3000, -+ expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, ""test"", 3000, 300, 0, 1000, 300), - }, - ""both local and global limits are set with local limit < global limit"": { - maxLocalStreamsPerUser: 150, - maxGlobalStreamsPerUser: 1000, - ringReplicationFactor: 3, - ringIngesterCount: 10, -- expected: 150, -+ streams: 3000, -+ expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, ""test"", 3000, 150, 150, 1000, 300), - }, - ""both local and global limits are set with local limit > global limit"": { - maxLocalStreamsPerUser: 500, - maxGlobalStreamsPerUser: 1000, - ringReplicationFactor: 3, - ringIngesterCount: 10, -- expected: 300, -- }, -- } -- -- for testName, testData := range tests { -- testData := testData -- -- t.Run(testName, func(t *testing.T) { -- // Mock the ring -- ring := &ringCountMock{count: testData.ringIngesterCount} -- -- // Mock limits -- limits, err := validation.NewOverrides(validation.Limits{ -- MaxLocalStreamsPerUser: testData.maxLocalStreamsPerUser, -- MaxGlobalStreamsPerUser: testData.maxGlobalStreamsPerUser, -- }, nil) -- require.NoError(t, err) -- -- limiter := NewLimiter(limits, ring, testData.ringReplicationFactor) -- actual := limiter.maxStreamsPerUser(""test"") -- -- assert.Equal(t, testData.expected, actual) -- }) -- } --} -- --func TestLimiter_AssertMaxStreamsPerUser(t *testing.T) { -- tests := map[string]struct { -- maxLocalStreamsPerUser int -- maxGlobalStreamsPerUser int -- ringReplicationFactor int -- ringIngesterCount int -- streams int -- expected error -- }{ -- ""both local and global limit are disabled"": { -- maxLocalStreamsPerUser: 0, -- maxGlobalStreamsPerUser: 0, -- ringReplicationFactor: 1, -- ringIngesterCount: 1, -- streams: 100, -- expected: nil, -- }, -- ""current number of streams is below the limit"": { -- maxLocalStreamsPerUser: 0, -- maxGlobalStreamsPerUser: 1000, -- ringReplicationFactor: 3, -- ringIngesterCount: 10, -- streams: 299, -- expected: nil, -- }, -- ""current number of streams is above the limit"": { -- maxLocalStreamsPerUser: 0, -- maxGlobalStreamsPerUser: 1000, -- ringReplicationFactor: 3, -- ringIngesterCount: 10, -- streams: 300, -- expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, 0, 1000, 300), -+ streams: 3000, -+ expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, ""test"", 3000, 300, 500, 1000, 300), - }, - } - -diff --git a/pkg/util/validation/validate.go b/pkg/util/validation/validate.go -index 9293a989c17fb..97f54caa15e20 100644 ---- a/pkg/util/validation/validate.go -+++ b/pkg/util/validation/validate.go -@@ -10,6 +10,9 @@ const ( - RateLimited = ""rate_limited"" - // LineTooLong is a reason for discarding too long log lines. - LineTooLong = ""line_too_long"" -+ // StreamLimit is a reason for discarding lines when we can't create a new stream -+ // because the limit of active streams has been reached. -+ StreamLimit = ""stream_limit"" - ) - - // DiscardedBytes is a metric of the total discarded bytes, by reason.",Loki,"Improve logging and add metrics to streams dropped by stream limit (#2012) - -* Improve the log message when we drop streams because a user is hitting a stream limit. -Increment the dropped samples metrics when this happens also. - -Signed-off-by: Ed Welch - -* improving comments - -Signed-off-by: Ed Welch " -3d0236bc7140ca21279a9295ddca70ce6c1f53ef,2024-10-21 21:07:18,Christian Haudum,"chore: Fix variable declaration for Bloom Build dashboard (#14553) - -Signed-off-by: Christian Haudum ",False,"diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-bloom-build.json b/production/loki-mixin-compiled-ssd/dashboards/loki-bloom-build.json -index 02aa2ee1d3416..149dfacd857d3 100644 ---- a/production/loki-mixin-compiled-ssd/dashboards/loki-bloom-build.json -+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-bloom-build.json -@@ -6367,7 +6367,7 @@ - ""multi"": false, - ""name"": ""tenant"", - ""options"": [ ], -- ""query"": ""label_values(loki_bloomplanner_tenant_tasks_planned{cluster=\""$cluster\"", namespace=\""$namespace\""})"", -+ ""query"": ""label_values(loki_bloomplanner_tenant_tasks_planned{cluster=\""$cluster\"", namespace=\""$namespace\""}, tenant)"", - ""refresh"": 0, - ""regex"": """", - ""sort"": 3, -diff --git a/production/loki-mixin-compiled/dashboards/loki-bloom-build.json b/production/loki-mixin-compiled/dashboards/loki-bloom-build.json -index 02aa2ee1d3416..149dfacd857d3 100644 ---- a/production/loki-mixin-compiled/dashboards/loki-bloom-build.json -+++ b/production/loki-mixin-compiled/dashboards/loki-bloom-build.json -@@ -6367,7 +6367,7 @@ - ""multi"": false, - ""name"": ""tenant"", - ""options"": [ ], -- ""query"": ""label_values(loki_bloomplanner_tenant_tasks_planned{cluster=\""$cluster\"", namespace=\""$namespace\""})"", -+ ""query"": ""label_values(loki_bloomplanner_tenant_tasks_planned{cluster=\""$cluster\"", namespace=\""$namespace\""}, tenant)"", - ""refresh"": 0, - ""regex"": """", - ""sort"": 3, -diff --git a/production/loki-mixin/dashboards/loki-bloom-build.libsonnet b/production/loki-mixin/dashboards/loki-bloom-build.libsonnet -index bce61a0e50215..5405f6ef1b602 100644 ---- a/production/loki-mixin/dashboards/loki-bloom-build.libsonnet -+++ b/production/loki-mixin/dashboards/loki-bloom-build.libsonnet -@@ -12,7 +12,7 @@ local template = import 'grafonnet/template.libsonnet'; - template.new( - 'tenant', - '$datasource', -- 'label_values(loki_bloomplanner_tenant_tasks_planned{cluster=""$cluster"", namespace=""$namespace""})', -+ 'label_values(loki_bloomplanner_tenant_tasks_planned{cluster=""$cluster"", namespace=""$namespace""}, tenant)', - label='Tenant', - sort=3, // numerical ascending - includeAll=true,",chore,"Fix variable declaration for Bloom Build dashboard (#14553) - -Signed-off-by: Christian Haudum " -9d84a6868b4327a243c6f26e0c5c9954402ada23,2025-02-04 19:40:12,sherinabr,fix: export ExcludedMetadataLabels so it can be extended in GEL (#16083),False,"diff --git a/pkg/util/entry_size.go b/pkg/util/entry_size.go -index 4f2c8f0bf82dc..91f0b300010a6 100644 ---- a/pkg/util/entry_size.go -+++ b/pkg/util/entry_size.go -@@ -20,12 +20,12 @@ func EntryTotalSize(entry *push.Entry) int { - return len(entry.Line) + StructuredMetadataSize(entry.StructuredMetadata) - } - --var excludedStructuredMetadataLabels = []string{constants.LevelLabel} -+var ExcludedStructuredMetadataLabels = []string{constants.LevelLabel} - - func StructuredMetadataSize(metas push.LabelsAdapter) int { - size := 0 - for _, meta := range metas { -- if slices.Contains(excludedStructuredMetadataLabels, meta.Name) { -+ if slices.Contains(ExcludedStructuredMetadataLabels, meta.Name) { - continue - } - size += len(meta.Name) + len(meta.Value)",fix,export ExcludedMetadataLabels so it can be extended in GEL (#16083) -ea6abbfd079f79dcc0d019bf1691a6a3a9803a8f,2023-03-08 02:55:54,Dylan Guedes,"Loki-Mixin: Remove query-readiness panel (#8735) - -**What this PR does / why we need it**: -Remove from loki-mixin the panel that shows the query_readiness metric. -The metric was removed a while ago.",False,"diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-reads-resources.json b/production/loki-mixin-compiled-ssd/dashboards/loki-reads-resources.json -index 4cef18cdf6aab..e54223c76aa13 100644 ---- a/production/loki-mixin-compiled-ssd/dashboards/loki-reads-resources.json -+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-reads-resources.json -@@ -535,83 +535,6 @@ - ""show"": false - } - ] -- }, -- { -- ""aliasColors"": { }, -- ""bars"": false, -- ""dashLength"": 10, -- ""dashes"": false, -- ""datasource"": ""$datasource"", -- ""fill"": 1, -- ""gridPos"": { }, -- ""id"": 7, -- ""legend"": { -- ""avg"": false, -- ""current"": false, -- ""max"": false, -- ""min"": false, -- ""show"": true, -- ""total"": false, -- ""values"": false -- }, -- ""lines"": true, -- ""linewidth"": 1, -- ""links"": [ ], -- ""nullPointMode"": ""null as zero"", -- ""percentage"": false, -- ""pointradius"": 5, -- ""points"": false, -- ""renderer"": ""flot"", -- ""seriesOverrides"": [ ], -- ""spaceLength"": 10, -- ""span"": 6, -- ""stack"": false, -- ""steppedLine"": false, -- ""targets"": [ -- { -- ""expr"": ""loki_boltdb_shipper_query_readiness_duration_seconds{cluster=~\""$cluster\"", namespace=~\""$namespace\""}"", -- ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""duration"", -- ""legendLink"": null, -- ""step"": 10 -- } -- ], -- ""thresholds"": [ ], -- ""timeFrom"": null, -- ""timeShift"": null, -- ""title"": ""Query Readiness Duration"", -- ""tooltip"": { -- ""shared"": true, -- ""sort"": 2, -- ""value_type"": ""individual"" -- }, -- ""type"": ""graph"", -- ""xaxis"": { -- ""buckets"": null, -- ""mode"": ""time"", -- ""name"": null, -- ""show"": true, -- ""values"": [ ] -- }, -- ""yaxes"": [ -- { -- ""format"": ""s"", -- ""label"": null, -- ""logBase"": 1, -- ""max"": null, -- ""min"": 0, -- ""show"": true -- }, -- { -- ""format"": ""short"", -- ""label"": null, -- ""logBase"": 1, -- ""max"": null, -- ""min"": null, -- ""show"": false -- } -- ] - } - ], - ""repeat"": null, -@@ -633,7 +556,7 @@ - ""dashes"": false, - ""datasource"": ""$datasource"", - ""fill"": 1, -- ""id"": 8, -+ ""id"": 7, - ""legend"": { - ""avg"": false, - ""current"": false, -@@ -734,7 +657,7 @@ - ""dashes"": false, - ""datasource"": ""$datasource"", - ""fill"": 1, -- ""id"": 9, -+ ""id"": 8, - ""legend"": { - ""avg"": false, - ""current"": false, -@@ -835,7 +758,7 @@ - ""dashes"": false, - ""datasource"": ""$datasource"", - ""fill"": 1, -- ""id"": 10, -+ ""id"": 9, - ""legend"": { - ""avg"": false, - ""current"": false, -diff --git a/production/loki-mixin-compiled/dashboards/loki-reads-resources.json b/production/loki-mixin-compiled/dashboards/loki-reads-resources.json -index 33fdffdcd3097..9e4ef679a8725 100644 ---- a/production/loki-mixin-compiled/dashboards/loki-reads-resources.json -+++ b/production/loki-mixin-compiled/dashboards/loki-reads-resources.json -@@ -1634,83 +1634,6 @@ - ""show"": false - } - ] -- }, -- { -- ""aliasColors"": { }, -- ""bars"": false, -- ""dashLength"": 10, -- ""dashes"": false, -- ""datasource"": ""$datasource"", -- ""fill"": 1, -- ""gridPos"": { }, -- ""id"": 19, -- ""legend"": { -- ""avg"": false, -- ""current"": false, -- ""max"": false, -- ""min"": false, -- ""show"": true, -- ""total"": false, -- ""values"": false -- }, -- ""lines"": true, -- ""linewidth"": 1, -- ""links"": [ ], -- ""nullPointMode"": ""null as zero"", -- ""percentage"": false, -- ""pointradius"": 5, -- ""points"": false, -- ""renderer"": ""flot"", -- ""seriesOverrides"": [ ], -- ""spaceLength"": 10, -- ""span"": 6, -- ""stack"": false, -- ""steppedLine"": false, -- ""targets"": [ -- { -- ""expr"": ""loki_boltdb_shipper_query_readiness_duration_seconds{cluster=~\""$cluster\"", namespace=~\""$namespace\""}"", -- ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""duration"", -- ""legendLink"": null, -- ""step"": 10 -- } -- ], -- ""thresholds"": [ ], -- ""timeFrom"": null, -- ""timeShift"": null, -- ""title"": ""Query Readiness Duration"", -- ""tooltip"": { -- ""shared"": true, -- ""sort"": 2, -- ""value_type"": ""individual"" -- }, -- ""type"": ""graph"", -- ""xaxis"": { -- ""buckets"": null, -- ""mode"": ""time"", -- ""name"": null, -- ""show"": true, -- ""values"": [ ] -- }, -- ""yaxes"": [ -- { -- ""format"": ""s"", -- ""label"": null, -- ""logBase"": 1, -- ""max"": null, -- ""min"": 0, -- ""show"": true -- }, -- { -- ""format"": ""short"", -- ""label"": null, -- ""logBase"": 1, -- ""max"": null, -- ""min"": null, -- ""show"": false -- } -- ] - } - ], - ""repeat"": null, -@@ -1732,7 +1655,7 @@ - ""dashes"": false, - ""datasource"": ""$datasource"", - ""fill"": 1, -- ""id"": 20, -+ ""id"": 19, - ""legend"": { - ""avg"": false, - ""current"": false, -@@ -1833,7 +1756,7 @@ - ""dashes"": false, - ""datasource"": ""$datasource"", - ""fill"": 1, -- ""id"": 21, -+ ""id"": 20, - ""legend"": { - ""avg"": false, - ""current"": false, -@@ -1934,7 +1857,7 @@ - ""dashes"": false, - ""datasource"": ""$datasource"", - ""fill"": 1, -- ""id"": 22, -+ ""id"": 21, - ""legend"": { - ""avg"": false, - ""current"": false, -@@ -2021,7 +1944,7 @@ - ""datasource"": ""$datasource"", - ""fill"": 1, - ""gridPos"": { }, -- ""id"": 23, -+ ""id"": 22, - ""legend"": { - ""avg"": false, - ""current"": false, -@@ -2098,7 +2021,7 @@ - ""datasource"": ""$datasource"", - ""fill"": 1, - ""gridPos"": { }, -- ""id"": 24, -+ ""id"": 23, - ""legend"": { - ""avg"": false, - ""current"": false, -@@ -2200,7 +2123,7 @@ - ""datasource"": ""$datasource"", - ""fill"": 1, - ""gridPos"": { }, -- ""id"": 25, -+ ""id"": 24, - ""legend"": { - ""avg"": false, - ""current"": false, -@@ -2302,7 +2225,7 @@ - ""datasource"": ""$datasource"", - ""fill"": 1, - ""gridPos"": { }, -- ""id"": 26, -+ ""id"": 25, - ""legend"": { - ""avg"": false, - ""current"": false, -diff --git a/production/loki-mixin/dashboards/loki-reads-resources.libsonnet b/production/loki-mixin/dashboards/loki-reads-resources.libsonnet -index c03241eeb92be..4e54760b513ba 100644 ---- a/production/loki-mixin/dashboards/loki-reads-resources.libsonnet -+++ b/production/loki-mixin/dashboards/loki-reads-resources.libsonnet -@@ -120,13 +120,6 @@ local utils = import 'mixin-utils/utils.libsonnet'; - .addPanel( - $.containerDiskSpaceUtilizationPanel('Disk Space Utilization', index_gateway_job_matcher), - ) -- .addPanel( -- $.panel('Query Readiness Duration') + -- $.queryPanel( -- ['loki_boltdb_shipper_query_readiness_duration_seconds{%s}' % $.namespaceMatcher()], ['duration'] -- ) + -- { yaxes: $.yaxes('s') }, -- ) - ) - .addRow( - $.row('Ingester')",unknown,"Loki-Mixin: Remove query-readiness panel (#8735) - -**What this PR does / why we need it**: -Remove from loki-mixin the panel that shows the query_readiness metric. -The metric was removed a while ago." -db438aa30911363cf1a654143e8a383ee84cb2ec,2025-03-22 02:45:31,renovate[bot],"chore(deps): update dependency eslint to v9.23.0 (main) (#16865) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json -index dfef3c1d32313..4975916689fff 100644 ---- a/pkg/ui/frontend/package-lock.json -+++ b/pkg/ui/frontend/package-lock.json -@@ -870,9 +870,9 @@ - } - }, - ""node_modules/@eslint/config-helpers"": { -- ""version"": ""0.1.0"", -- ""resolved"": ""https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.1.0.tgz"", -- ""integrity"": ""sha512-kLrdPDJE1ckPo94kmPPf9Hfd0DU0Jw6oKYrhe+pwSC0iTUInmTa+w6fw8sGgcfkFJGNdWOUeOaDM4quW4a7OkA=="", -+ ""version"": ""0.2.0"", -+ ""resolved"": ""https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.2.0.tgz"", -+ ""integrity"": ""sha512-yJLLmLexii32mGrhW29qvU3QBVTu0GUmEf/J4XsBtVhp4JkIUFN/BjWqTF63yRvGApIDpZm5fa97LtYtINmfeQ=="", - ""dev"": true, - ""license"": ""Apache-2.0"", - ""engines"": { -@@ -893,9 +893,9 @@ - } - }, - ""node_modules/@eslint/eslintrc"": { -- ""version"": ""3.3.0"", -- ""resolved"": ""https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.0.tgz"", -- ""integrity"": ""sha512-yaVPAiNAalnCZedKLdR21GOGILMLKPyqSLWaAjQFvYA2i/ciDi8ArYVr69Anohb6cH2Ukhqti4aFnYyPm8wdwQ=="", -+ ""version"": ""3.3.1"", -+ ""resolved"": ""https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.1.tgz"", -+ ""integrity"": ""sha512-gtF186CXhIl1p4pJNGZw8Yc6RlshoePRvE0X91oPGb3vZ8pM3qOS9W9NGPat9LziaBV7XrJWGylNQXkGcnM3IQ=="", - ""dev"": true, - ""license"": ""MIT"", - ""dependencies"": { -@@ -930,9 +930,9 @@ - } - }, - ""node_modules/@eslint/js"": { -- ""version"": ""9.22.0"", -- ""resolved"": ""https://registry.npmjs.org/@eslint/js/-/js-9.22.0.tgz"", -- ""integrity"": ""sha512-vLFajx9o8d1/oL2ZkpMYbkLv8nDB6yaIwFNt7nI4+I80U/z03SxmfOMsLbvWr3p7C+Wnoh//aOu2pQW8cS0HCQ=="", -+ ""version"": ""9.23.0"", -+ ""resolved"": ""https://registry.npmjs.org/@eslint/js/-/js-9.23.0.tgz"", -+ ""integrity"": ""sha512-35MJ8vCPU0ZMxo7zfev2pypqTwWTofFZO6m4KAtdoFhRpLJUpHTZZ+KB3C7Hb1d7bULYwO4lJXGCi5Se+8OMbw=="", - ""dev"": true, - ""license"": ""MIT"", - ""engines"": { -@@ -4072,19 +4072,19 @@ - } - }, - ""node_modules/eslint"": { -- ""version"": ""9.22.0"", -- ""resolved"": ""https://registry.npmjs.org/eslint/-/eslint-9.22.0.tgz"", -- ""integrity"": ""sha512-9V/QURhsRN40xuHXWjV64yvrzMjcz7ZyNoF2jJFmy9j/SLk0u1OLSZgXi28MrXjymnjEGSR80WCdab3RGMDveQ=="", -+ ""version"": ""9.23.0"", -+ ""resolved"": ""https://registry.npmjs.org/eslint/-/eslint-9.23.0.tgz"", -+ ""integrity"": ""sha512-jV7AbNoFPAY1EkFYpLq5bslU9NLNO8xnEeQXwErNibVryjk67wHVmddTBilc5srIttJDBrB0eMHKZBFbSIABCw=="", - ""dev"": true, - ""license"": ""MIT"", - ""dependencies"": { - ""@eslint-community/eslint-utils"": ""^4.2.0"", - ""@eslint-community/regexpp"": ""^4.12.1"", - ""@eslint/config-array"": ""^0.19.2"", -- ""@eslint/config-helpers"": ""^0.1.0"", -+ ""@eslint/config-helpers"": ""^0.2.0"", - ""@eslint/core"": ""^0.12.0"", -- ""@eslint/eslintrc"": ""^3.3.0"", -- ""@eslint/js"": ""9.22.0"", -+ ""@eslint/eslintrc"": ""^3.3.1"", -+ ""@eslint/js"": ""9.23.0"", - ""@eslint/plugin-kit"": ""^0.2.7"", - ""@humanfs/node"": ""^0.16.6"", - ""@humanwhocodes/module-importer"": ""^1.0.1"",",chore,"update dependency eslint to v9.23.0 (main) (#16865) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -23f998a5aa71182ca791612fb2c8165c3a140efb,2019-08-19 21:54:10,Robert Fratto,ci: update apt-get before installing deps for rootless step (#914),False,"diff --git a/.circleci/config.yml b/.circleci/config.yml -index bd623920c2bda..a35601fd14e61 100644 ---- a/.circleci/config.yml -+++ b/.circleci/config.yml -@@ -64,7 +64,7 @@ workflows: - - publish/canary - - publish/docker-driver - filters: {<<: *tag-or-master} -- -+ - - test-helm: - requires: [ lint, test ] - filters: {<<: *tags} -@@ -91,7 +91,8 @@ workflows: - run: - name: rootless - command: | -- sudo apt-get install -qy uidmap libseccomp-dev binfmt-support go-bindata -+ sudo apt-get update && \ -+ sudo apt-get install -qy uidmap libseccomp-dev binfmt-support go-bindata - sudo docker run --privileged linuxkit/binfmt:v0.6 - - .img: &img",ci,update apt-get before installing deps for rootless step (#914) -21dd4afdc76d7790e177d2dd364ecf5b629c8112,2024-05-30 19:04:05,Jack Baldry,"docs: Republish the sizing calculator but don't list it in the table of contents and don't index it (#13070) - -Co-authored-by: J Stickler ",False,"diff --git a/docs/sources/setup/size/_index.md b/docs/sources/setup/size/_index.md -index 74dcb8e504964..162748eb9e3b8 100644 ---- a/docs/sources/setup/size/_index.md -+++ b/docs/sources/setup/size/_index.md -@@ -1,4 +1,7 @@ - --- -+_build: -+ list: false -+noindex: true - title: Size the cluster - menuTitle: Size the cluster - description: Provides a tool that generates a Helm Chart values.yaml file based on expected ingestion, retention rate, and node type, to help size your Grafana deployment. -@@ -6,7 +9,6 @@ aliases: - - ../installation/sizing/ - - ../installation/helm/generate - weight: 100 --draft: true - --- - - ",docs,"Republish the sizing calculator but don't list it in the table of contents and don't index it (#13070) - -Co-authored-by: J Stickler " -7654c27c121048d9022c439779f73c44105f218d,2019-12-14 03:16:44,Cyril Tovena,"Adds configurable compression algorithms for chunks (#1411) - -* Adds L4Z encoding. - -Signed-off-by: Cyril Tovena - -* Adds encoding benchmarks - -Signed-off-by: Cyril Tovena - -* Adds snappy encoding. - -Signed-off-by: Cyril Tovena - -* Adds chunk size test - -Signed-off-by: Cyril Tovena - -* Adds snappy v2 - -Signed-off-by: Cyril Tovena - -* Improve benchmarks - -Signed-off-by: Cyril Tovena - -* Remove chunkenc - -Signed-off-by: Cyril Tovena - -* Update lz4 to latest master version. - -Signed-off-by: Peter Štibraný - -* Use temporary buffer in serialise method to avoid allocations when doing string -> byte conversion. -It also makes code little more readable. We pool those buffers for reuse. - -Signed-off-by: Peter Štibraný - -* Added gzip -1 for comparison. - -Signed-off-by: Peter Štibraný - -* Initialize reader and buffered reader lazily. - -This helps with reader/buffered reader reuse. - -Signed-off-by: Peter Štibraný - -* Don't keep entries, extracted generateData function - -(mostly to get more understandable profile) - -Signed-off-by: Peter Štibraný - -* Improve test and benchmark to cover all encodings. - -Signed-off-by: Cyril Tovena - -* Adds support for a new chunk format with encoding info. - -Signed-off-by: Cyril Tovena - -* Ingesters now support encoding config. - -Signed-off-by: Cyril Tovena - -* Add support for no compression. - -Signed-off-by: Cyril Tovena - -* Add docs - -Signed-off-by: Cyril Tovena - -* Remove default Gzip for ByteChunk. - -Signed-off-by: Cyril Tovena - -* Removes none, snappyv2 and gzip-1 - -Signed-off-by: Cyril Tovena - -* Move log test lines to testdata and add supported encoding stringer - -Signed-off-by: Cyril Tovena - -* got linted - -Signed-off-by: Cyril Tovena ",False,"diff --git a/docs/configuration/README.md b/docs/configuration/README.md -index 0d4633be0d46c..46b2f399734b1 100644 ---- a/docs/configuration/README.md -+++ b/docs/configuration/README.md -@@ -268,7 +268,7 @@ The `ingester_config` block configures Ingesters. - [chunk_idle_period: | default = 30m] - - # The targeted _uncompressed_ size in bytes of a chunk block --# When this threshold is exceeded the head block will be cut and compressed inside the chunk -+# When this threshold is exceeded the head block will be cut and compressed inside the chunk - [chunk_block_size: | default = 262144] - - # A target _compressed_ size in bytes for chunks. -@@ -277,6 +277,13 @@ The `ingester_config` block configures Ingesters. - # The default value of 0 for this will create chunks with a fixed 10 blocks, - # A non zero value will create chunks with a variable number of blocks to meet the target size. - [chunk_target_size: | default = 0] -+ -+# The compression algorithm to use for chunks. (supported: gzip, gzip-1, lz4, none, snappy, snappyv2) -+# You should choose your algorithm depending on your need: -+# - `gzip` highest compression ratio but also slowest decompression speed. (144 kB per chunk) -+# - `lz4` fastest compression speed (188 kB per chunk) -+# - `snappy` fast and popular compression algorithm (272 kB per chunk) -+[chunk_encoding: | default = gzip] - ``` - - ### lifecycler_config -diff --git a/go.mod b/go.mod -index a20367fe1e6df..3ec811d3ca5d6 100644 ---- a/go.mod -+++ b/go.mod -@@ -16,8 +16,10 @@ require ( - github.com/docker/go-connections v0.4.0 // indirect - github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect - github.com/docker/go-plugins-helpers v0.0.0-20181025120712-1e6269c305b8 -+ github.com/dustin/go-humanize v1.0.0 - github.com/fatih/color v1.7.0 - github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c -+ github.com/frankban/quicktest v1.7.2 // indirect - github.com/go-kit/kit v0.9.0 - github.com/gocql/gocql v0.0.0-20181124151448-70385f88b28b // indirect - github.com/gogo/protobuf v1.3.0 // remember to update loki-build-image/Dockerfile too -@@ -31,14 +33,14 @@ require ( - github.com/influxdata/go-syslog/v2 v2.0.1 - github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af - github.com/json-iterator/go v1.1.7 -- github.com/klauspost/compress v1.7.4 -- github.com/klauspost/cpuid v1.2.1 // indirect -+ github.com/klauspost/compress v1.9.4 - github.com/mitchellh/mapstructure v1.1.2 - github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect - github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f - github.com/opencontainers/go-digest v1.0.0-rc1 // indirect - github.com/opencontainers/image-spec v1.0.1 // indirect - github.com/opentracing/opentracing-go v1.1.0 -+ github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible - github.com/pkg/errors v0.8.1 - github.com/prometheus/client_golang v1.1.0 - github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 -diff --git a/go.sum b/go.sum -index cb6a1b5c22241..b5caaf5fda951 100644 ---- a/go.sum -+++ b/go.sum -@@ -159,6 +159,8 @@ github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c h1:QwbffUs/+p - github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c/go.mod h1:WQX+afhrekY9rGK+WT4xvKSlzmia9gDoLYu4GGYGASQ= - github.com/fluent/fluent-logger-golang v1.2.1/go.mod h1:2/HCT/jTy78yGyeNGQLGQsjF3zzzAuy6Xlk6FCMV5eU= - github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -+github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= -+github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= - github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= - github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= - github.com/fsouza/fake-gcs-server v1.3.0 h1:f2mbomatUsbw8NRY7rzqiiWNn4BRM+Jredz0Pt70Usg= -@@ -394,10 +396,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V - github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= - github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= - github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= --github.com/klauspost/compress v1.7.4 h1:4UqAIzZ1Ns2epCTyJ1d2xMWvxtX+FNSCYWeOFogK9nc= --github.com/klauspost/compress v1.7.4/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= --github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= --github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -+github.com/klauspost/compress v1.9.4 h1:xhvAeUPQ2drNUhKtrGdTGNvV9nNafHMUkRyLkzxJoB4= -+github.com/klauspost/compress v1.9.4/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= - github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= - github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= - github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -@@ -506,6 +506,8 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T - github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= - github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= - github.com/philhofer/fwd v0.0.0-20160129035939-98c11a7a6ec8/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -+github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible h1:5isCJDRADbeSlWx6KVXAYwrcihyCGVXr7GNCdLEVDr8= -+github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= - github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= - github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= - github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -diff --git a/pkg/chunkenc/facade.go b/pkg/chunkenc/facade.go -index 8556b0fdd0442..d603920e3e653 100644 ---- a/pkg/chunkenc/facade.go -+++ b/pkg/chunkenc/facade.go -@@ -7,13 +7,18 @@ import ( - ) - - // GzipLogChunk is a cortex encoding type for our chunks. -+// Deprecated: the chunk encoding/compression format is inside the chunk data. - const GzipLogChunk = encoding.Encoding(128) - -+// LogChunk is a cortex encoding type for our chunks. -+const LogChunk = encoding.Encoding(129) -+ - func init() { - encoding.MustRegisterEncoding(GzipLogChunk, ""GzipLogChunk"", func() encoding.Chunk { -- return &Facade{ -- c: NewMemChunk(EncGZIP), -- } -+ return &Facade{} -+ }) -+ encoding.MustRegisterEncoding(LogChunk, ""LogChunk"", func() encoding.Chunk { -+ return &Facade{} - }) - } - -@@ -32,6 +37,9 @@ func NewFacade(c Chunk) encoding.Chunk { - - // Marshal implements encoding.Chunk. - func (f Facade) Marshal(w io.Writer) error { -+ if f.c == nil { -+ return nil -+ } - buf, err := f.c.Bytes() - if err != nil { - return err -@@ -49,11 +57,14 @@ func (f *Facade) UnmarshalFromBuf(buf []byte) error { - - // Encoding implements encoding.Chunk. - func (Facade) Encoding() encoding.Encoding { -- return GzipLogChunk -+ return LogChunk - } - - // Utilization implements encoding.Chunk. - func (f Facade) Utilization() float64 { -+ if f.c == nil { -+ return 0 -+ } - return f.c.Utilization() - } - -@@ -66,7 +77,7 @@ func (f Facade) LokiChunk() Chunk { - func UncompressedSize(c encoding.Chunk) (int, bool) { - f, ok := c.(*Facade) - -- if !ok { -+ if !ok || f.c == nil { - return 0, false - } - -diff --git a/pkg/chunkenc/gzip_test.go b/pkg/chunkenc/gzip_test.go -deleted file mode 100644 -index 7cebc3d1373be..0000000000000 ---- a/pkg/chunkenc/gzip_test.go -+++ /dev/null -@@ -1,396 +0,0 @@ --package chunkenc -- --import ( -- ""bytes"" -- ""fmt"" -- ""math"" -- ""math/rand"" -- ""sync"" -- ""testing"" -- ""time"" -- -- ""github.com/stretchr/testify/assert"" -- -- ""github.com/stretchr/testify/require"" -- -- ""github.com/grafana/loki/pkg/logproto"" --) -- --func TestGZIPBlock(t *testing.T) { -- chk := NewMemChunk(EncGZIP) -- -- cases := []struct { -- ts int64 -- str string -- cut bool -- }{ -- { -- ts: 1, -- str: ""hello, world!"", -- }, -- { -- ts: 2, -- str: ""hello, world2!"", -- }, -- { -- ts: 3, -- str: ""hello, world3!"", -- }, -- { -- ts: 4, -- str: ""hello, world4!"", -- }, -- { -- ts: 5, -- str: ""hello, world5!"", -- }, -- { -- ts: 6, -- str: ""hello, world6!"", -- cut: true, -- }, -- { -- ts: 7, -- str: ""hello, world7!"", -- }, -- { -- ts: 8, -- str: ""hello, worl\nd8!"", -- }, -- { -- ts: 8, -- str: ""hello, world 8, 2!"", -- }, -- { -- ts: 8, -- str: ""hello, world 8, 3!"", -- }, -- { -- ts: 9, -- str: """", -- }, -- } -- -- for _, c := range cases { -- require.NoError(t, chk.Append(logprotoEntry(c.ts, c.str))) -- if c.cut { -- require.NoError(t, chk.cut()) -- } -- } -- -- it, err := chk.Iterator(time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, nil) -- require.NoError(t, err) -- -- idx := 0 -- for it.Next() { -- e := it.Entry() -- require.Equal(t, cases[idx].ts, e.Timestamp.UnixNano()) -- require.Equal(t, cases[idx].str, e.Line) -- idx++ -- } -- -- require.NoError(t, it.Error()) -- require.Equal(t, len(cases), idx) -- -- t.Run(""bounded-iteration"", func(t *testing.T) { -- it, err := chk.Iterator(time.Unix(0, 3), time.Unix(0, 7), logproto.FORWARD, nil) -- require.NoError(t, err) -- -- idx := 2 -- for it.Next() { -- e := it.Entry() -- require.Equal(t, cases[idx].ts, e.Timestamp.UnixNano()) -- require.Equal(t, cases[idx].str, e.Line) -- idx++ -- } -- require.NoError(t, it.Error()) -- require.Equal(t, 6, idx) -- }) --} -- --func TestGZIPSerialisation(t *testing.T) { -- chk := NewMemChunk(EncGZIP) -- -- numSamples := 500000 -- -- for i := 0; i < numSamples; i++ { -- require.NoError(t, chk.Append(logprotoEntry(int64(i), string(i)))) -- } -- -- byt, err := chk.Bytes() -- require.NoError(t, err) -- -- bc, err := NewByteChunk(byt) -- require.NoError(t, err) -- -- it, err := bc.Iterator(time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, nil) -- require.NoError(t, err) -- for i := 0; i < numSamples; i++ { -- require.True(t, it.Next()) -- -- e := it.Entry() -- require.Equal(t, int64(i), e.Timestamp.UnixNano()) -- require.Equal(t, string(i), e.Line) -- } -- -- require.NoError(t, it.Error()) -- -- byt2, err := chk.Bytes() -- require.NoError(t, err) -- -- require.True(t, bytes.Equal(byt, byt2)) --} -- --func TestGZIPChunkFilling(t *testing.T) { -- chk := NewMemChunk(EncGZIP) -- chk.blockSize = 1024 -- -- // We should be able to append only 10KB of logs. -- maxBytes := chk.blockSize * blocksPerChunk -- lineSize := 512 -- lines := maxBytes / lineSize -- -- logLine := string(make([]byte, lineSize)) -- entry := &logproto.Entry{ -- Timestamp: time.Unix(0, 0), -- Line: logLine, -- } -- -- i := int64(0) -- for ; chk.SpaceFor(entry) && i < 30; i++ { -- entry.Timestamp = time.Unix(0, i) -- require.NoError(t, chk.Append(entry)) -- } -- -- require.Equal(t, int64(lines), i) -- -- it, err := chk.Iterator(time.Unix(0, 0), time.Unix(0, 100), logproto.FORWARD, nil) -- require.NoError(t, err) -- i = 0 -- for it.Next() { -- entry := it.Entry() -- require.Equal(t, i, entry.Timestamp.UnixNano()) -- i++ -- } -- -- require.Equal(t, int64(lines), i) --} -- --func TestGZIPChunkTargetSize(t *testing.T) { -- targetSize := 1024 * 1024 -- chk := NewMemChunkSize(EncGZIP, 1024, targetSize) -- -- lineSize := 512 -- entry := &logproto.Entry{ -- Timestamp: time.Unix(0, 0), -- Line: """", -- } -- -- // Use a random number to generate random log data, otherwise the gzip compression is way too good -- // and the following loop has to run waaayyyyy to many times -- // Using the same seed should guarantee the same random numbers and same test data. -- r := rand.New(rand.NewSource(99)) -- -- i := int64(0) -- -- for ; chk.SpaceFor(entry) && i < 5000; i++ { -- logLine := make([]byte, lineSize) -- for j := range logLine { -- logLine[j] = byte(r.Int()) -- } -- entry = &logproto.Entry{ -- Timestamp: time.Unix(0, 0), -- Line: string(logLine), -- } -- entry.Timestamp = time.Unix(0, i) -- require.NoError(t, chk.Append(entry)) -- } -- -- // 5000 is a limit ot make sure the test doesn't run away, we shouldn't need this many log lines to make 1MB chunk -- require.NotEqual(t, 5000, i) -- -- require.NoError(t, chk.Close()) -- -- require.Equal(t, 0, chk.head.size) -- -- // Even though the seed is static above and results should be deterministic, -- // we will allow +/- 10% variance -- minSize := int(float64(targetSize) * 0.9) -- maxSize := int(float64(targetSize) * 1.1) -- require.Greater(t, chk.CompressedSize(), minSize) -- require.Less(t, chk.CompressedSize(), maxSize) -- -- // Also verify our utilization is close to 1.0 -- ut := chk.Utilization() -- require.Greater(t, ut, 0.99) -- require.Less(t, ut, 1.01) -- --} -- --func TestMemChunk_AppendOutOfOrder(t *testing.T) { -- t.Parallel() -- -- type tester func(t *testing.T, chk *MemChunk) -- -- tests := map[string]tester{ -- ""append out of order in the same block"": func(t *testing.T, chk *MemChunk) { -- assert.NoError(t, chk.Append(logprotoEntry(5, ""test""))) -- assert.NoError(t, chk.Append(logprotoEntry(6, ""test""))) -- -- assert.EqualError(t, chk.Append(logprotoEntry(1, ""test"")), ErrOutOfOrder.Error()) -- }, -- ""append out of order in a new block right after cutting the previous one"": func(t *testing.T, chk *MemChunk) { -- assert.NoError(t, chk.Append(logprotoEntry(5, ""test""))) -- assert.NoError(t, chk.Append(logprotoEntry(6, ""test""))) -- assert.NoError(t, chk.cut()) -- -- assert.EqualError(t, chk.Append(logprotoEntry(1, ""test"")), ErrOutOfOrder.Error()) -- }, -- ""append out of order in a new block after multiple cuts"": func(t *testing.T, chk *MemChunk) { -- assert.NoError(t, chk.Append(logprotoEntry(5, ""test""))) -- assert.NoError(t, chk.cut()) -- -- assert.NoError(t, chk.Append(logprotoEntry(6, ""test""))) -- assert.NoError(t, chk.cut()) -- -- assert.EqualError(t, chk.Append(logprotoEntry(1, ""test"")), ErrOutOfOrder.Error()) -- }, -- } -- -- for testName, tester := range tests { -- tester := tester -- -- t.Run(testName, func(t *testing.T) { -- t.Parallel() -- -- tester(t, NewMemChunk(EncGZIP)) -- }) -- } --} -- --var result []Chunk -- --func BenchmarkWriteGZIP(b *testing.B) { -- chunks := []Chunk{} -- -- entry := &logproto.Entry{ -- Timestamp: time.Unix(0, 0), -- Line: RandString(512), -- } -- i := int64(0) -- -- for n := 0; n < b.N; n++ { -- c := NewMemChunk(EncGZIP) -- // adds until full so we trigger cut which serialize using gzip -- for c.SpaceFor(entry) { -- _ = c.Append(entry) -- entry.Timestamp = time.Unix(0, i) -- i++ -- } -- chunks = append(chunks, c) -- } -- result = chunks --} -- --func BenchmarkReadGZIP(b *testing.B) { -- chunks := []Chunk{} -- i := int64(0) -- for n := 0; n < 50; n++ { -- entry := randSizeEntry(0) -- c := NewMemChunk(EncGZIP) -- // adds until full so we trigger cut which serialize using gzip -- for c.SpaceFor(entry) { -- _ = c.Append(entry) -- i++ -- entry = randSizeEntry(i) -- } -- c.Close() -- chunks = append(chunks, c) -- } -- entries := []logproto.Entry{} -- b.ResetTimer() -- for n := 0; n < b.N; n++ { -- var wg sync.WaitGroup -- for _, c := range chunks { -- wg.Add(1) -- go func(c Chunk) { -- iterator, err := c.Iterator(time.Unix(0, 0), time.Now(), logproto.BACKWARD, nil) -- if err != nil { -- panic(err) -- } -- for iterator.Next() { -- entries = append(entries, iterator.Entry()) -- } -- iterator.Close() -- wg.Done() -- }(c) -- } -- wg.Wait() -- } --} -- --func BenchmarkHeadBlockIterator(b *testing.B) { -- -- for _, j := range []int{100000, 50000, 15000, 10000} { -- b.Run(fmt.Sprintf(""Size %d"", j), func(b *testing.B) { -- -- h := headBlock{} -- -- for i := 0; i < j; i++ { -- if err := h.append(int64(i), ""this is the append string""); err != nil { -- b.Fatal(err) -- } -- } -- -- b.ResetTimer() -- -- for n := 0; n < b.N; n++ { -- iter := h.iterator(0, math.MaxInt64, nil) -- -- for iter.Next() { -- _ = iter.Entry() -- } -- } -- }) -- } --} -- --func randSizeEntry(ts int64) *logproto.Entry { -- var line string -- switch ts % 10 { -- case 0: -- line = RandString(27000) -- case 1: -- line = RandString(10000) -- case 2, 3, 4, 5: -- line = RandString(2048) -- default: -- line = RandString(4096) -- } -- return &logproto.Entry{ -- Timestamp: time.Unix(0, ts), -- Line: line, -- } --} -- --const charset = ""abcdefghijklmnopqrstuvwxyz"" + -- ""ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"" -- --func RandStringWithCharset(length int, charset string) string { -- b := make([]byte, length) -- for i := range b { -- b[i] = charset[rand.Intn(len(charset)-1)] -- } -- return string(b) --} -- --func RandString(length int) string { -- return RandStringWithCharset(length, charset) --} -- --func logprotoEntry(ts int64, line string) *logproto.Entry { -- return &logproto.Entry{ -- Timestamp: time.Unix(0, ts), -- Line: line, -- } --} -diff --git a/pkg/chunkenc/interface.go b/pkg/chunkenc/interface.go -index b9446b45a95e1..bec5966116b9d 100644 ---- a/pkg/chunkenc/interface.go -+++ b/pkg/chunkenc/interface.go -@@ -2,7 +2,8 @@ package chunkenc - - import ( - ""errors"" -- ""io"" -+ ""fmt"" -+ ""strings"" - ""time"" - - ""github.com/grafana/loki/pkg/iter"" -@@ -20,15 +21,23 @@ var ( - ) - - // Encoding is the identifier for a chunk encoding. --type Encoding uint8 -+type Encoding byte - - // The different available encodings. - const ( - EncNone Encoding = iota - EncGZIP - EncDumb -+ EncLZ4 -+ EncSnappy - ) - -+var supportedEncoding = []Encoding{ -+ EncGZIP, -+ EncLZ4, -+ EncSnappy, -+} -+ - func (e Encoding) String() string { - switch e { - case EncGZIP: -@@ -37,11 +46,38 @@ func (e Encoding) String() string { - return ""none"" - case EncDumb: - return ""dumb"" -+ case EncLZ4: -+ return ""lz4"" -+ case EncSnappy: -+ return ""snappy"" - default: - return ""unknown"" - } - } - -+// ParseEncoding parses an chunk encoding (compression algorithm) by its name. -+func ParseEncoding(enc string) (Encoding, error) { -+ for _, e := range supportedEncoding { -+ if strings.EqualFold(e.String(), enc) { -+ return e, nil -+ } -+ } -+ return 0, fmt.Errorf(""invalid encoding: %s, supported: %s"", enc, SupportedEncoding()) -+ -+} -+ -+// SupportedEncoding returns the list of supported Encoding. -+func SupportedEncoding() string { -+ var sb strings.Builder -+ for i := range supportedEncoding { -+ sb.WriteString(supportedEncoding[i].String()) -+ if i != len(supportedEncoding)-1 { -+ sb.WriteString("", "") -+ } -+ } -+ return sb.String() -+} -+ - // Chunk is the interface for the compressed logs chunk format. - type Chunk interface { - Bounds() (time.Time, time.Time) -@@ -56,17 +92,3 @@ type Chunk interface { - CompressedSize() int - Close() error - } -- --// CompressionWriter is the writer that compresses the data passed to it. --type CompressionWriter interface { -- Write(p []byte) (int, error) -- Close() error -- Flush() error -- Reset(w io.Writer) --} -- --// CompressionReader reads the compressed data. --type CompressionReader interface { -- Read(p []byte) (int, error) -- Reset(r io.Reader) error --} -diff --git a/pkg/chunkenc/interface_test.go b/pkg/chunkenc/interface_test.go -new file mode 100644 -index 0000000000000..abb61a673eeb4 ---- /dev/null -+++ b/pkg/chunkenc/interface_test.go -@@ -0,0 +1,26 @@ -+package chunkenc -+ -+import ""testing"" -+ -+func TestParseEncoding(t *testing.T) { -+ tests := []struct { -+ enc string -+ want Encoding -+ wantErr bool -+ }{ -+ {""gzip"", EncGZIP, false}, -+ {""bad"", 0, true}, -+ } -+ for _, tt := range tests { -+ t.Run(tt.enc, func(t *testing.T) { -+ got, err := ParseEncoding(tt.enc) -+ if (err != nil) != tt.wantErr { -+ t.Errorf(""ParseEncoding() error = %v, wantErr %v"", err, tt.wantErr) -+ return -+ } -+ if got != tt.want { -+ t.Errorf(""ParseEncoding() = %v, want %v"", got, tt.want) -+ } -+ }) -+ } -+} -diff --git a/pkg/chunkenc/gzip.go b/pkg/chunkenc/memchunk.go -similarity index 85% -rename from pkg/chunkenc/gzip.go -rename to pkg/chunkenc/memchunk.go -index 2132205c523ff..cbf10b577868c 100644 ---- a/pkg/chunkenc/gzip.go -+++ b/pkg/chunkenc/memchunk.go -@@ -23,6 +23,7 @@ var ( - magicNumber = uint32(0x12EE56A) - - chunkFormatV1 = byte(1) -+ chunkFormatV2 = byte(2) - ) - - // The table gets initialized with sync.Once but may still cause a race -@@ -55,8 +56,12 @@ type MemChunk struct { - // Current in-mem block being appended to. - head *headBlock - -+ // the chunk format default to v2 -+ format byte - encoding Encoding -- cPool CompressionPool -+ -+ readers ReaderPool -+ writers WriterPool - } - - type block struct { -@@ -99,32 +104,34 @@ func (hb *headBlock) append(ts int64, line string) error { - return nil - } - --func (hb *headBlock) serialise(pool CompressionPool) ([]byte, error) { -- buf := &bytes.Buffer{} -+func (hb *headBlock) serialise(pool WriterPool) ([]byte, error) { -+ inBuf := serializeBytesBufferPool.Get().(*bytes.Buffer) -+ outBuf := &bytes.Buffer{} -+ - encBuf := make([]byte, binary.MaxVarintLen64) -- compressedWriter := pool.GetWriter(buf) -+ compressedWriter := pool.GetWriter(outBuf) - for _, logEntry := range hb.entries { - n := binary.PutVarint(encBuf, logEntry.t) -- _, err := compressedWriter.Write(encBuf[:n]) -- if err != nil { -- return nil, errors.Wrap(err, ""appending entry"") -- } -+ inBuf.Write(encBuf[:n]) - - n = binary.PutUvarint(encBuf, uint64(len(logEntry.s))) -- _, err = compressedWriter.Write(encBuf[:n]) -- if err != nil { -- return nil, errors.Wrap(err, ""appending entry"") -- } -- _, err = compressedWriter.Write([]byte(logEntry.s)) -- if err != nil { -- return nil, errors.Wrap(err, ""appending entry"") -- } -+ inBuf.Write(encBuf[:n]) -+ -+ inBuf.WriteString(logEntry.s) -+ } -+ -+ if _, err := compressedWriter.Write(inBuf.Bytes()); err != nil { -+ return nil, errors.Wrap(err, ""appending entry"") - } - if err := compressedWriter.Close(); err != nil { - return nil, errors.Wrap(err, ""flushing pending compress buffer"") - } -+ -+ inBuf.Reset() -+ serializeBytesBufferPool.Put(inBuf) -+ - pool.PutWriter(compressedWriter) -- return buf.Bytes(), nil -+ return outBuf.Bytes(), nil - } - - type entry struct { -@@ -132,6 +139,11 @@ type entry struct { - s string - } - -+// NewMemChunk returns a new in-mem chunk for query. -+func NewMemChunk(enc Encoding) *MemChunk { -+ return NewMemChunkSize(enc, 256*1024, 0) -+} -+ - // NewMemChunkSize returns a new in-mem chunk. - // Mainly for config push size. - func NewMemChunkSize(enc Encoding, blockSize, targetSize int) *MemChunk { -@@ -140,34 +152,22 @@ func NewMemChunkSize(enc Encoding, blockSize, targetSize int) *MemChunk { - targetSize: targetSize, // Desired chunk size in compressed bytes - blocks: []block{}, - -- head: &headBlock{}, -+ head: &headBlock{}, -+ format: chunkFormatV2, - - encoding: enc, -- } -- -- switch enc { -- case EncGZIP: -- c.cPool = &Gzip -- default: -- panic(""unknown encoding"") -+ writers: getWriterPool(enc), -+ readers: getReaderPool(enc), - } - - return c - } - --// NewMemChunk returns a new in-mem chunk for query. --func NewMemChunk(enc Encoding) *MemChunk { -- return NewMemChunkSize(enc, 256*1024, 0) --} -- - // NewByteChunk returns a MemChunk on the passed bytes. - func NewByteChunk(b []byte) (*MemChunk, error) { - bc := &MemChunk{ -- cPool: &Gzip, -- encoding: EncGZIP, -- head: &headBlock{}, // Dummy, empty headblock. -+ head: &headBlock{}, // Dummy, empty headblock. - } -- - db := decbuf{b: b} - - // Verify the header. -@@ -178,7 +178,18 @@ func NewByteChunk(b []byte) (*MemChunk, error) { - if m != magicNumber { - return nil, errors.Errorf(""invalid magic number %x"", m) - } -- if version != 1 { -+ bc.format = version -+ switch version { -+ case chunkFormatV1: -+ bc.readers, bc.writers = &Gzip, &Gzip -+ case chunkFormatV2: -+ // format v2 has a byte for block encoding. -+ enc := Encoding(db.byte()) -+ if db.err() != nil { -+ return nil, errors.Wrap(db.err(), ""verifying encoding"") -+ } -+ bc.readers, bc.writers = getReaderPool(enc), getWriterPool(enc) -+ default: - return nil, errors.Errorf(""invalid version %d"", version) - } - -@@ -242,7 +253,11 @@ func (c *MemChunk) Bytes() ([]byte, error) { - - // Write the header (magicNum + version). - eb.putBE32(magicNumber) -- eb.putByte(chunkFormatV1) -+ eb.putByte(c.format) -+ if c.format == chunkFormatV2 { -+ // chunk format v2 has a byte for encoding. -+ eb.putByte(byte(c.encoding)) -+ } - - n, err := buf.Write(eb.get()) - if err != nil { -@@ -401,7 +416,7 @@ func (c *MemChunk) cut() error { - return nil - } - -- b, err := c.head.serialise(c.cPool) -+ b, err := c.head.serialise(c.writers) - if err != nil { - return err - } -@@ -451,7 +466,7 @@ func (c *MemChunk) Iterator(mintT, maxtT time.Time, direction logproto.Direction - - for _, b := range c.blocks { - if maxt > b.mint && b.maxt > mint { -- its = append(its, b.iterator(c.cPool, filter)) -+ its = append(its, b.iterator(c.readers, filter)) - } - } - -@@ -472,7 +487,7 @@ func (c *MemChunk) Iterator(mintT, maxtT time.Time, direction logproto.Direction - return iter.NewEntryIteratorBackward(iterForward) - } - --func (b block) iterator(pool CompressionPool, filter logql.Filter) iter.EntryIterator { -+func (b block) iterator(pool ReaderPool, filter logql.Filter) iter.EntryIterator { - if len(b.b) == 0 { - return emptyIterator - } -@@ -537,9 +552,11 @@ func (li *listIterator) Close() error { return nil } - func (li *listIterator) Labels() string { return """" } - - type bufferedIterator struct { -- s *bufio.Reader -- reader CompressionReader -- pool CompressionPool -+ origBytes []byte -+ -+ bufReader *bufio.Reader -+ reader io.Reader -+ pool ReaderPool - - cur logproto.Entry - -@@ -553,18 +570,24 @@ type bufferedIterator struct { - filter logql.Filter - } - --func newBufferedIterator(pool CompressionPool, b []byte, filter logql.Filter) *bufferedIterator { -- r := pool.GetReader(bytes.NewBuffer(b)) -+func newBufferedIterator(pool ReaderPool, b []byte, filter logql.Filter) *bufferedIterator { - return &bufferedIterator{ -- s: BufReaderPool.Get(r), -- reader: r, -- pool: pool, -- filter: filter, -- decBuf: make([]byte, binary.MaxVarintLen64), -+ origBytes: b, -+ reader: nil, // will be initialized later -+ bufReader: nil, // will be initialized later -+ pool: pool, -+ filter: filter, -+ decBuf: make([]byte, binary.MaxVarintLen64), - } - } - - func (si *bufferedIterator) Next() bool { -+ if !si.closed && si.reader == nil { -+ // initialize reader now, hopefully reusing one of the previous readers -+ si.reader = si.pool.GetReader(bytes.NewBuffer(si.origBytes)) -+ si.bufReader = BufReaderPool.Get(si.reader) -+ } -+ - for { - ts, line, ok := si.moveNext() - if !ok { -@@ -582,7 +605,7 @@ func (si *bufferedIterator) Next() bool { - - // moveNext moves the buffer to the next entry - func (si *bufferedIterator) moveNext() (int64, []byte, bool) { -- ts, err := binary.ReadVarint(si.s) -+ ts, err := binary.ReadVarint(si.bufReader) - if err != nil { - if err != io.EOF { - si.err = err -@@ -590,7 +613,7 @@ func (si *bufferedIterator) moveNext() (int64, []byte, bool) { - return 0, nil, false - } - -- l, err := binary.ReadUvarint(si.s) -+ l, err := binary.ReadUvarint(si.bufReader) - if err != nil { - if err != io.EOF { - si.err = err -@@ -612,13 +635,13 @@ func (si *bufferedIterator) moveNext() (int64, []byte, bool) { - } - - // Then process reading the line. -- n, err := si.s.Read(si.buf[:lineSize]) -+ n, err := si.bufReader.Read(si.buf[:lineSize]) - if err != nil && err != io.EOF { - si.err = err - return 0, nil, false - } - for n < lineSize { -- r, err := si.s.Read(si.buf[n:lineSize]) -+ r, err := si.bufReader.Read(si.buf[n:lineSize]) - if err != nil { - si.err = err - return 0, nil, false -@@ -638,11 +661,12 @@ func (si *bufferedIterator) Close() error { - if !si.closed { - si.closed = true - si.pool.PutReader(si.reader) -- BufReaderPool.Put(si.s) -+ BufReaderPool.Put(si.bufReader) - if si.buf != nil { - BytesBufferPool.Put(si.buf) - } -- si.s = nil -+ si.origBytes = nil -+ si.bufReader = nil - si.buf = nil - si.decBuf = nil - si.reader = nil -diff --git a/pkg/chunkenc/memchunk_test.go b/pkg/chunkenc/memchunk_test.go -new file mode 100644 -index 0000000000000..433d478a5d070 ---- /dev/null -+++ b/pkg/chunkenc/memchunk_test.go -@@ -0,0 +1,418 @@ -+package chunkenc -+ -+import ( -+ ""bytes"" -+ ""fmt"" -+ ""math"" -+ ""math/rand"" -+ ""testing"" -+ ""time"" -+ -+ ""github.com/stretchr/testify/assert"" -+ -+ ""github.com/dustin/go-humanize"" -+ ""github.com/grafana/loki/pkg/chunkenc/testdata"" -+ ""github.com/grafana/loki/pkg/logproto"" -+ ""github.com/stretchr/testify/require"" -+) -+ -+var testEncoding = []Encoding{ -+ EncNone, -+ EncGZIP, -+ EncLZ4, -+ EncSnappy, -+} -+ -+func TestBlock(t *testing.T) { -+ for _, enc := range testEncoding { -+ t.Run(enc.String(), func(t *testing.T) { -+ chk := NewMemChunk(enc) -+ cases := []struct { -+ ts int64 -+ str string -+ cut bool -+ }{ -+ { -+ ts: 1, -+ str: ""hello, world!"", -+ }, -+ { -+ ts: 2, -+ str: ""hello, world2!"", -+ }, -+ { -+ ts: 3, -+ str: ""hello, world3!"", -+ }, -+ { -+ ts: 4, -+ str: ""hello, world4!"", -+ }, -+ { -+ ts: 5, -+ str: ""hello, world5!"", -+ }, -+ { -+ ts: 6, -+ str: ""hello, world6!"", -+ cut: true, -+ }, -+ { -+ ts: 7, -+ str: ""hello, world7!"", -+ }, -+ { -+ ts: 8, -+ str: ""hello, worl\nd8!"", -+ }, -+ { -+ ts: 8, -+ str: ""hello, world 8, 2!"", -+ }, -+ { -+ ts: 8, -+ str: ""hello, world 8, 3!"", -+ }, -+ { -+ ts: 9, -+ str: """", -+ }, -+ } -+ -+ for _, c := range cases { -+ require.NoError(t, chk.Append(logprotoEntry(c.ts, c.str))) -+ if c.cut { -+ require.NoError(t, chk.cut()) -+ } -+ } -+ -+ it, err := chk.Iterator(time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, nil) -+ require.NoError(t, err) -+ -+ idx := 0 -+ for it.Next() { -+ e := it.Entry() -+ require.Equal(t, cases[idx].ts, e.Timestamp.UnixNano()) -+ require.Equal(t, cases[idx].str, e.Line) -+ idx++ -+ } -+ -+ require.NoError(t, it.Error()) -+ require.Equal(t, len(cases), idx) -+ -+ t.Run(""bounded-iteration"", func(t *testing.T) { -+ it, err := chk.Iterator(time.Unix(0, 3), time.Unix(0, 7), logproto.FORWARD, nil) -+ require.NoError(t, err) -+ -+ idx := 2 -+ for it.Next() { -+ e := it.Entry() -+ require.Equal(t, cases[idx].ts, e.Timestamp.UnixNano()) -+ require.Equal(t, cases[idx].str, e.Line) -+ idx++ -+ } -+ require.NoError(t, it.Error()) -+ require.Equal(t, 6, idx) -+ }) -+ }) -+ } -+} -+ -+func TestReadFormatV1(t *testing.T) { -+ c := NewMemChunk(EncGZIP) -+ fillChunk(c) -+ // overrides default v2 format -+ c.format = chunkFormatV1 -+ -+ b, err := c.Bytes() -+ if err != nil { -+ t.Fatal(err) -+ } -+ -+ r, err := NewByteChunk(b) -+ if err != nil { -+ t.Fatal(err) -+ } -+ -+ it, err := r.Iterator(time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, nil) -+ if err != nil { -+ t.Fatal(err) -+ } -+ -+ i := int64(0) -+ for it.Next() { -+ require.Equal(t, i, it.Entry().Timestamp.UnixNano()) -+ require.Equal(t, testdata.LogString(i), it.Entry().Line) -+ -+ i++ -+ } -+} -+ -+func TestSerialization(t *testing.T) { -+ for _, enc := range testEncoding { -+ t.Run(enc.String(), func(t *testing.T) { -+ chk := NewMemChunk(enc) -+ -+ numSamples := 500000 -+ -+ for i := 0; i < numSamples; i++ { -+ require.NoError(t, chk.Append(logprotoEntry(int64(i), string(i)))) -+ } -+ -+ byt, err := chk.Bytes() -+ require.NoError(t, err) -+ -+ bc, err := NewByteChunk(byt) -+ require.NoError(t, err) -+ -+ it, err := bc.Iterator(time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, nil) -+ require.NoError(t, err) -+ for i := 0; i < numSamples; i++ { -+ require.True(t, it.Next()) -+ -+ e := it.Entry() -+ require.Equal(t, int64(i), e.Timestamp.UnixNano()) -+ require.Equal(t, string(i), e.Line) -+ } -+ -+ require.NoError(t, it.Error()) -+ -+ byt2, err := chk.Bytes() -+ require.NoError(t, err) -+ -+ require.True(t, bytes.Equal(byt, byt2)) -+ }) -+ } -+} -+ -+func TestChunkFilling(t *testing.T) { -+ for _, enc := range testEncoding { -+ t.Run(enc.String(), func(t *testing.T) { -+ chk := NewMemChunk(enc) -+ chk.blockSize = 1024 -+ -+ // We should be able to append only 10KB of logs. -+ maxBytes := chk.blockSize * blocksPerChunk -+ lineSize := 512 -+ lines := maxBytes / lineSize -+ -+ logLine := string(make([]byte, lineSize)) -+ entry := &logproto.Entry{ -+ Timestamp: time.Unix(0, 0), -+ Line: logLine, -+ } -+ -+ i := int64(0) -+ for ; chk.SpaceFor(entry) && i < 30; i++ { -+ entry.Timestamp = time.Unix(0, i) -+ require.NoError(t, chk.Append(entry)) -+ } -+ -+ require.Equal(t, int64(lines), i) -+ -+ it, err := chk.Iterator(time.Unix(0, 0), time.Unix(0, 100), logproto.FORWARD, nil) -+ require.NoError(t, err) -+ i = 0 -+ for it.Next() { -+ entry := it.Entry() -+ require.Equal(t, i, entry.Timestamp.UnixNano()) -+ i++ -+ } -+ -+ require.Equal(t, int64(lines), i) -+ }) -+ } -+} -+ -+func TestGZIPChunkTargetSize(t *testing.T) { -+ targetSize := 1024 * 1024 -+ chk := NewMemChunkSize(EncGZIP, 1024, targetSize) -+ -+ lineSize := 512 -+ entry := &logproto.Entry{ -+ Timestamp: time.Unix(0, 0), -+ Line: """", -+ } -+ -+ // Use a random number to generate random log data, otherwise the gzip compression is way too good -+ // and the following loop has to run waaayyyyy to many times -+ // Using the same seed should guarantee the same random numbers and same test data. -+ r := rand.New(rand.NewSource(99)) -+ -+ i := int64(0) -+ -+ for ; chk.SpaceFor(entry) && i < 5000; i++ { -+ logLine := make([]byte, lineSize) -+ for j := range logLine { -+ logLine[j] = byte(r.Int()) -+ } -+ entry = &logproto.Entry{ -+ Timestamp: time.Unix(0, 0), -+ Line: string(logLine), -+ } -+ entry.Timestamp = time.Unix(0, i) -+ require.NoError(t, chk.Append(entry)) -+ } -+ -+ // 5000 is a limit ot make sure the test doesn't run away, we shouldn't need this many log lines to make 1MB chunk -+ require.NotEqual(t, 5000, i) -+ -+ require.NoError(t, chk.Close()) -+ -+ require.Equal(t, 0, chk.head.size) -+ -+ // Even though the seed is static above and results should be deterministic, -+ // we will allow +/- 10% variance -+ minSize := int(float64(targetSize) * 0.9) -+ maxSize := int(float64(targetSize) * 1.1) -+ require.Greater(t, chk.CompressedSize(), minSize) -+ require.Less(t, chk.CompressedSize(), maxSize) -+ -+ // Also verify our utilization is close to 1.0 -+ ut := chk.Utilization() -+ require.Greater(t, ut, 0.99) -+ require.Less(t, ut, 1.01) -+ -+} -+ -+func TestMemChunk_AppendOutOfOrder(t *testing.T) { -+ t.Parallel() -+ -+ type tester func(t *testing.T, chk *MemChunk) -+ -+ tests := map[string]tester{ -+ ""append out of order in the same block"": func(t *testing.T, chk *MemChunk) { -+ assert.NoError(t, chk.Append(logprotoEntry(5, ""test""))) -+ assert.NoError(t, chk.Append(logprotoEntry(6, ""test""))) -+ -+ assert.EqualError(t, chk.Append(logprotoEntry(1, ""test"")), ErrOutOfOrder.Error()) -+ }, -+ ""append out of order in a new block right after cutting the previous one"": func(t *testing.T, chk *MemChunk) { -+ assert.NoError(t, chk.Append(logprotoEntry(5, ""test""))) -+ assert.NoError(t, chk.Append(logprotoEntry(6, ""test""))) -+ assert.NoError(t, chk.cut()) -+ -+ assert.EqualError(t, chk.Append(logprotoEntry(1, ""test"")), ErrOutOfOrder.Error()) -+ }, -+ ""append out of order in a new block after multiple cuts"": func(t *testing.T, chk *MemChunk) { -+ assert.NoError(t, chk.Append(logprotoEntry(5, ""test""))) -+ assert.NoError(t, chk.cut()) -+ -+ assert.NoError(t, chk.Append(logprotoEntry(6, ""test""))) -+ assert.NoError(t, chk.cut()) -+ -+ assert.EqualError(t, chk.Append(logprotoEntry(1, ""test"")), ErrOutOfOrder.Error()) -+ }, -+ } -+ -+ for testName, tester := range tests { -+ tester := tester -+ -+ t.Run(testName, func(t *testing.T) { -+ t.Parallel() -+ -+ tester(t, NewMemChunk(EncGZIP)) -+ }) -+ } -+} -+ -+func TestChunkSize(t *testing.T) { -+ for _, enc := range testEncoding { -+ t.Run(enc.String(), func(t *testing.T) { -+ c := NewMemChunk(enc) -+ inserted := fillChunk(c) -+ b, err := c.Bytes() -+ if err != nil { -+ t.Fatal(err) -+ } -+ t.Log(""Chunk size"", humanize.Bytes(uint64(len(b)))) -+ t.Log(""characters "", inserted) -+ }) -+ -+ } -+} -+ -+var result []Chunk -+ -+func BenchmarkWrite(b *testing.B) { -+ chunks := []Chunk{} -+ -+ entry := &logproto.Entry{ -+ Timestamp: time.Unix(0, 0), -+ Line: testdata.LogString(0), -+ } -+ i := int64(0) -+ -+ for _, enc := range testEncoding { -+ b.Run(enc.String(), func(b *testing.B) { -+ for n := 0; n < b.N; n++ { -+ c := NewMemChunk(enc) -+ // adds until full so we trigger cut which serialize using gzip -+ for c.SpaceFor(entry) { -+ _ = c.Append(entry) -+ entry.Timestamp = time.Unix(0, i) -+ entry.Line = testdata.LogString(i) -+ i++ -+ } -+ chunks = append(chunks, c) -+ } -+ result = chunks -+ }) -+ } -+ -+} -+ -+func BenchmarkRead(b *testing.B) { -+ for _, enc := range testEncoding { -+ b.Run(enc.String(), func(b *testing.B) { -+ chunks := generateData(enc) -+ b.ResetTimer() -+ bytesRead := int64(0) -+ now := time.Now() -+ for n := 0; n < b.N; n++ { -+ for _, c := range chunks { -+ // use forward iterator for benchmark -- backward iterator does extra allocations by keeping entries in memory -+ iterator, err := c.Iterator(time.Unix(0, 0), time.Now(), logproto.FORWARD, nil) -+ if err != nil { -+ panic(err) -+ } -+ for iterator.Next() { -+ e := iterator.Entry() -+ bytesRead += int64(len(e.Line)) -+ } -+ if err := iterator.Close(); err != nil { -+ b.Fatal(err) -+ } -+ } -+ } -+ b.Log(""bytes per second "", humanize.Bytes(uint64(float64(bytesRead)/time.Since(now).Seconds()))) -+ b.Log(""n="", b.N) -+ }) -+ } -+} -+ -+func BenchmarkHeadBlockIterator(b *testing.B) { -+ -+ for _, j := range []int{100000, 50000, 15000, 10000} { -+ b.Run(fmt.Sprintf(""Size %d"", j), func(b *testing.B) { -+ -+ h := headBlock{} -+ -+ for i := 0; i < j; i++ { -+ if err := h.append(int64(i), ""this is the append string""); err != nil { -+ b.Fatal(err) -+ } -+ } -+ -+ b.ResetTimer() -+ -+ for n := 0; n < b.N; n++ { -+ iter := h.iterator(0, math.MaxInt64, nil) -+ -+ for iter.Next() { -+ _ = iter.Entry() -+ } -+ } -+ }) -+ } -+} -diff --git a/pkg/chunkenc/pool.go b/pkg/chunkenc/pool.go -index 12a9e9df61904..51461d44fcbfe 100644 ---- a/pkg/chunkenc/pool.go -+++ b/pkg/chunkenc/pool.go -@@ -2,25 +2,39 @@ package chunkenc - - import ( - ""bufio"" -+ ""bytes"" - ""io"" - ""sync"" - -+ ""github.com/golang/snappy"" - ""github.com/klauspost/compress/gzip"" -+ ""github.com/pierrec/lz4"" - ""github.com/prometheus/prometheus/pkg/pool"" - ) - --// CompressionPool is a pool of CompressionWriter and CompressionReader -+// WriterPool is a pool of io.Writer - // This is used by every chunk to avoid unnecessary allocations. --type CompressionPool interface { -- GetWriter(io.Writer) CompressionWriter -- PutWriter(CompressionWriter) -- GetReader(io.Reader) CompressionReader -- PutReader(CompressionReader) -+type WriterPool interface { -+ GetWriter(io.Writer) io.WriteCloser -+ PutWriter(io.WriteCloser) -+} -+ -+// ReaderPool similar to WriterPool but for reading chunks. -+type ReaderPool interface { -+ GetReader(io.Reader) io.Reader -+ PutReader(io.Reader) - } - - var ( - // Gzip is the gun zip compression pool -- Gzip GzipPool -+ Gzip = GzipPool{level: gzip.DefaultCompression} -+ // LZ4 is the l4z compression pool -+ LZ4 LZ4Pool -+ // Snappy is the snappy compression pool -+ Snappy SnappyPool -+ // Noop is the no compression pool -+ Noop NoopPool -+ - // BufReaderPool is bufio.Reader pool - BufReaderPool = &BufioReaderPool{ - pool: sync.Pool{ -@@ -29,54 +43,180 @@ var ( - } - // BytesBufferPool is a bytes buffer used for lines decompressed. - // Buckets [0.5KB,1KB,2KB,4KB,8KB] -- BytesBufferPool = pool.New(1<<9, 1<<13, 2, func(size int) interface{} { return make([]byte, 0, size) }) -+ BytesBufferPool = pool.New(1<<9, 1<<13, 2, func(size int) interface{} { return make([]byte, 0, size) }) -+ serializeBytesBufferPool = sync.Pool{ -+ New: func() interface{} { -+ return &bytes.Buffer{} -+ }, -+ } - ) - -+func getWriterPool(enc Encoding) WriterPool { -+ return getReaderPool(enc).(WriterPool) -+} -+ -+func getReaderPool(enc Encoding) ReaderPool { -+ switch enc { -+ case EncGZIP: -+ return &Gzip -+ case EncLZ4: -+ return &LZ4 -+ case EncSnappy: -+ return &Snappy -+ case EncNone: -+ return &Noop -+ default: -+ panic(""unknown encoding"") -+ } -+} -+ - // GzipPool is a gun zip compression pool - type GzipPool struct { - readers sync.Pool - writers sync.Pool -+ level int - } - - // GetReader gets or creates a new CompressionReader and reset it to read from src --func (pool *GzipPool) GetReader(src io.Reader) (reader CompressionReader) { -+func (pool *GzipPool) GetReader(src io.Reader) io.Reader { - if r := pool.readers.Get(); r != nil { -- reader = r.(CompressionReader) -+ reader := r.(*gzip.Reader) - err := reader.Reset(src) - if err != nil { - panic(err) - } -- } else { -- var err error -- reader, err = gzip.NewReader(src) -- if err != nil { -- panic(err) -- } -+ return reader -+ } -+ reader, err := gzip.NewReader(src) -+ if err != nil { -+ panic(err) - } - return reader - } - - // PutReader places back in the pool a CompressionReader --func (pool *GzipPool) PutReader(reader CompressionReader) { -+func (pool *GzipPool) PutReader(reader io.Reader) { - pool.readers.Put(reader) - } - - // GetWriter gets or creates a new CompressionWriter and reset it to write to dst --func (pool *GzipPool) GetWriter(dst io.Writer) (writer CompressionWriter) { -+func (pool *GzipPool) GetWriter(dst io.Writer) io.WriteCloser { - if w := pool.writers.Get(); w != nil { -- writer = w.(CompressionWriter) -+ writer := w.(*gzip.Writer) - writer.Reset(dst) -- } else { -- writer = gzip.NewWriter(dst) -+ return writer -+ } -+ -+ level := pool.level -+ if level == 0 { -+ level = gzip.DefaultCompression -+ } -+ w, err := gzip.NewWriterLevel(dst, level) -+ if err != nil { -+ panic(err) // never happens, error is only returned on wrong compression level. - } -- return writer -+ return w - } - - // PutWriter places back in the pool a CompressionWriter --func (pool *GzipPool) PutWriter(writer CompressionWriter) { -+func (pool *GzipPool) PutWriter(writer io.WriteCloser) { - pool.writers.Put(writer) - } - -+type LZ4Pool struct { -+ readers sync.Pool -+ writers sync.Pool -+} -+ -+// GetReader gets or creates a new CompressionReader and reset it to read from src -+func (pool *LZ4Pool) GetReader(src io.Reader) io.Reader { -+ if r := pool.readers.Get(); r != nil { -+ reader := r.(*lz4.Reader) -+ reader.Reset(src) -+ return reader -+ } -+ return lz4.NewReader(src) -+} -+ -+// PutReader places back in the pool a CompressionReader -+func (pool *LZ4Pool) PutReader(reader io.Reader) { -+ pool.readers.Put(reader) -+} -+ -+// GetWriter gets or creates a new CompressionWriter and reset it to write to dst -+func (pool *LZ4Pool) GetWriter(dst io.Writer) io.WriteCloser { -+ if w := pool.writers.Get(); w != nil { -+ writer := w.(*lz4.Writer) -+ writer.Reset(dst) -+ return writer -+ } -+ return lz4.NewWriter(dst) -+} -+ -+// PutWriter places back in the pool a CompressionWriter -+func (pool *LZ4Pool) PutWriter(writer io.WriteCloser) { -+ pool.writers.Put(writer) -+} -+ -+type SnappyPool struct { -+ readers sync.Pool -+ writers sync.Pool -+} -+ -+// GetReader gets or creates a new CompressionReader and reset it to read from src -+func (pool *SnappyPool) GetReader(src io.Reader) io.Reader { -+ if r := pool.readers.Get(); r != nil { -+ reader := r.(*snappy.Reader) -+ reader.Reset(src) -+ return reader -+ } -+ return snappy.NewReader(src) -+} -+ -+// PutReader places back in the pool a CompressionReader -+func (pool *SnappyPool) PutReader(reader io.Reader) { -+ pool.readers.Put(reader) -+} -+ -+// GetWriter gets or creates a new CompressionWriter and reset it to write to dst -+func (pool *SnappyPool) GetWriter(dst io.Writer) io.WriteCloser { -+ if w := pool.writers.Get(); w != nil { -+ writer := w.(*snappy.Writer) -+ writer.Reset(dst) -+ return writer -+ } -+ return snappy.NewBufferedWriter(dst) -+} -+ -+// PutWriter places back in the pool a CompressionWriter -+func (pool *SnappyPool) PutWriter(writer io.WriteCloser) { -+ pool.writers.Put(writer) -+} -+ -+type NoopPool struct{} -+ -+// GetReader gets or creates a new CompressionReader and reset it to read from src -+func (pool *NoopPool) GetReader(src io.Reader) io.Reader { -+ return src -+} -+ -+// PutReader places back in the pool a CompressionReader -+func (pool *NoopPool) PutReader(reader io.Reader) {} -+ -+type noopCloser struct { -+ io.Writer -+} -+ -+func (noopCloser) Close() error { return nil } -+ -+// GetWriter gets or creates a new CompressionWriter and reset it to write to dst -+func (pool *NoopPool) GetWriter(dst io.Writer) io.WriteCloser { -+ return noopCloser{dst} -+} -+ -+// PutWriter places back in the pool a CompressionWriter -+func (pool *NoopPool) PutWriter(writer io.WriteCloser) {} -+ - // BufioReaderPool is a bufio reader that uses sync.Pool. - type BufioReaderPool struct { - pool sync.Pool -diff --git a/pkg/chunkenc/testdata/testdata.go b/pkg/chunkenc/testdata/testdata.go -new file mode 100644 -index 0000000000000..265ca4d64052a ---- /dev/null -+++ b/pkg/chunkenc/testdata/testdata.go -@@ -0,0 +1,1013 @@ -+package testdata -+ -+import ""strings"" -+ -+// LogString returns a test log line. Returns the same line for the same index. -+func LogString(index int64) string { -+ if index > int64(len(logs)-1) { -+ index = index % int64(len(logs)) -+ } -+ return logs[index] -+} -+ -+var logs = strings.Split(`level=info ts=2019-12-12T15:00:08.325Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576130400000 maxt=1576152000000 ulid=01DVX9ZHNM71GRCJS7M34Q0EV7 sources=""[01DVWNC6NWY1A60AZV3Z6DGS65 01DVWW7XXX75GHA6ZDTD170CSZ 01DVX33N5W86CWJJVRPAVXJRWJ]"" duration=2.897213221s -+level=info ts=2019-12-12T15:00:08.296Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576130400000 maxt=1576152000000 ulid=01DVX9ZHQRVN42AF196NYJ9C4C sources=""[01DVWNC6NSPJRCSBZ4QD3SXS66 01DVWW7XY69Y4YT09HR0RSR8KY 01DVX33N5SMVPB1TMD9J1M8GGK]"" duration=2.800759388s -+level=info ts=2019-12-12T15:00:05.285Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1037 last=1039 duration=3.030078405s -+level=info ts=2019-12-12T15:00:05.225Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1037 last=1039 duration=3.019791992s -+level=info ts=2019-12-12T15:00:02.255Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=125.980176ms -+level=info ts=2019-12-12T15:00:02.206Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=127.111334ms -+level=info ts=2019-12-12T15:00:01.874Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576152000000 maxt=1576159200000 ulid=01DVX9ZCE8WZCTQJWSYDGHVQV8 duration=1.801853505s -+level=info ts=2019-12-12T15:00:01.854Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576152000000 maxt=1576159200000 ulid=01DVX9ZCDWEBXRYWA7585TN2RV duration=1.794588392s -+level=info ts=2019-12-12T13:00:05.461Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1034 last=1036 duration=3.044019343s -+level=info ts=2019-12-12T13:00:05.332Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1034 last=1036 duration=3.040243488s -+level=info ts=2019-12-12T13:00:02.417Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=128.883109ms -+level=info ts=2019-12-12T13:00:02.291Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=126.278558ms -+level=info ts=2019-12-12T13:00:02.048Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576144800000 maxt=1576152000000 ulid=01DVX33N5W86CWJJVRPAVXJRWJ duration=1.987867109s -+level=info ts=2019-12-12T13:00:01.914Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576144800000 maxt=1576152000000 ulid=01DVX33N5SMVPB1TMD9J1M8GGK duration=1.856432758s -+level=info ts=2019-12-12T12:58:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-12T12:52:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-12T11:00:05.320Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1031 last=1033 duration=2.999621843s -+level=info ts=2019-12-12T11:00:05.315Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1031 last=1033 duration=2.962560692s -+level=info ts=2019-12-12T11:00:02.352Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=131.600701ms -+level=info ts=2019-12-12T11:00:02.321Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=134.547131ms -+level=info ts=2019-12-12T11:00:01.975Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576137600000 maxt=1576144800000 ulid=01DVWW7XY69Y4YT09HR0RSR8KY duration=1.905948839s -+level=info ts=2019-12-12T11:00:01.889Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576137600000 maxt=1576144800000 ulid=01DVWW7XXX75GHA6ZDTD170CSZ duration=1.828298188s -+level=info ts=2019-12-12T10:55:24.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-12T10:49:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-12T10:33:24.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-12T10:25:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-12T10:21:24.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-12T10:14:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-12T09:00:16.465Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576065600000 maxt=1576130400000 ulid=01DVWNCFJRNW4RP8C56D4QNRXH sources=""[01DVVC60FYTRXZ9457XT10Y7AH 01DVW0S6A5HFTYBYD34SGAZJSR 01DVWNCC9SYJDQP0Y2RXK8XJC9]"" duration=7.289011992s -+level=info ts=2019-12-12T09:00:15.812Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576065600000 maxt=1576130400000 ulid=01DVWNCF9JNSMSKZHW8STXQARA sources=""[01DVVC60DBGMXD5DXR6Y5XWNXF 01DVW0S67R7JFBFTFWMNVS8YR3 01DVWNCC599NDRZWRRSZF4XGHF]"" duration=6.930550254s -+level=info ts=2019-12-12T09:00:08.717Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576108800000 maxt=1576130400000 ulid=01DVWNCC9SYJDQP0Y2RXK8XJC9 sources=""[01DVW0S0XW63CVRA3EPRSC8NWQ 01DVW7MR5W18322RVFY6WM9GR2 01DVWEGFDW0C09KSCRQ2F8DGN3]"" duration=2.900180235s -+level=info ts=2019-12-12T09:00:08.440Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576108800000 maxt=1576130400000 ulid=01DVWNCC599NDRZWRRSZF4XGHF sources=""[01DVW0S0XS1SQQQK3CQYCHN9HV 01DVW7MR5ZN3K38ZHBJ243HDZJ 01DVWEGFE0DGKKDG4V9AGAPPBQ]"" duration=2.767053211s -+level=info ts=2019-12-12T09:00:05.604Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1028 last=1030 duration=2.998418095s -+level=info ts=2019-12-12T09:00:05.470Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1028 last=1030 duration=3.008684806s -+level=info ts=2019-12-12T09:00:02.606Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=126.82085ms -+level=info ts=2019-12-12T09:00:02.461Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=127.770206ms -+level=info ts=2019-12-12T09:00:01.995Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576130400000 maxt=1576137600000 ulid=01DVWNC6NWY1A60AZV3Z6DGS65 duration=1.934602237s -+level=info ts=2019-12-12T09:00:01.960Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576130400000 maxt=1576137600000 ulid=01DVWNC6NSPJRCSBZ4QD3SXS66 duration=1.902822647s -+level=info ts=2019-12-12T08:59:54.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-12T08:54:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-12T08:12:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-12T08:05:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-12T07:00:05.421Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1025 last=1027 duration=3.037037204s -+level=info ts=2019-12-12T07:00:05.263Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1025 last=1027 duration=2.984857831s -+level=info ts=2019-12-12T07:00:02.383Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=126.79721ms -+level=info ts=2019-12-12T07:00:02.278Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=131.228064ms -+level=info ts=2019-12-12T07:00:02.052Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576123200000 maxt=1576130400000 ulid=01DVWEGFE0DGKKDG4V9AGAPPBQ duration=1.987940522s -+level=info ts=2019-12-12T07:00:01.927Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576123200000 maxt=1576130400000 ulid=01DVWEGFDW0C09KSCRQ2F8DGN3 duration=1.866990386s -+level=info ts=2019-12-12T05:00:05.355Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1022 last=1024 duration=3.046145151s -+level=info ts=2019-12-12T05:00:05.309Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1022 last=1024 duration=3.019897535s -+level=info ts=2019-12-12T05:00:02.309Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=128.294946ms -+level=info ts=2019-12-12T05:00:02.289Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=145.150847ms -+level=info ts=2019-12-12T05:00:01.939Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576116000000 maxt=1576123200000 ulid=01DVW7MR5ZN3K38ZHBJ243HDZJ duration=1.875204968s -+level=info ts=2019-12-12T05:00:01.813Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576116000000 maxt=1576123200000 ulid=01DVW7MR5W18322RVFY6WM9GR2 duration=1.753345795s -+level=info ts=2019-12-12T04:38:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-12T04:33:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-12T04:00:54.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-12T03:56:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-12T03:00:08.433Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576087200000 maxt=1576108800000 ulid=01DVW0S6A5HFTYBYD34SGAZJSR sources=""[01DVVC5V5WESMMH77FZVCJ80Q8 01DVVK1JDWNVFGWS4JPY2K4CAS 01DVVSX9NWR5V8SSJAPKQ2TCTH]"" duration=2.860812672s -+level=info ts=2019-12-12T03:00:08.279Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576087200000 maxt=1576108800000 ulid=01DVW0S67R7JFBFTFWMNVS8YR3 sources=""[01DVVC5V6145SMRFE0WR0P3YTQ 01DVVK1JE1SSYY4EKS4HAT4SK3 01DVVSX9NRE3DWK67A2J17BE0T]"" duration=2.782760638s -+level=info ts=2019-12-12T03:00:05.372Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1019 last=1021 duration=2.990754756s -+level=info ts=2019-12-12T03:00:05.289Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1019 last=1021 duration=3.007795347s -+level=info ts=2019-12-12T03:00:02.381Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=127.007667ms -+level=info ts=2019-12-12T03:00:02.282Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=133.138336ms -+level=info ts=2019-12-12T03:00:01.987Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576108800000 maxt=1576116000000 ulid=01DVW0S0XW63CVRA3EPRSC8NWQ duration=1.927367458s -+level=info ts=2019-12-12T03:00:01.906Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576108800000 maxt=1576116000000 ulid=01DVW0S0XS1SQQQK3CQYCHN9HV duration=1.84874308s -+level=info ts=2019-12-12T02:39:24.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-12T02:33:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-12T01:00:05.500Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1016 last=1018 duration=3.027246961s -+level=info ts=2019-12-12T01:00:05.265Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1016 last=1018 duration=2.989822576s -+level=info ts=2019-12-12T01:00:02.473Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=124.134851ms -+level=info ts=2019-12-12T01:00:02.275Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=126.268006ms -+level=info ts=2019-12-12T01:00:02.092Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576101600000 maxt=1576108800000 ulid=01DVVSX9NRE3DWK67A2J17BE0T duration=2.035218414s -+level=info ts=2019-12-12T01:00:01.907Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576101600000 maxt=1576108800000 ulid=01DVVSX9NWR5V8SSJAPKQ2TCTH duration=1.847566214s -+level=info ts=2019-12-11T23:00:05.552Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1013 last=1015 duration=3.042911717s -+level=info ts=2019-12-11T23:00:05.255Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1013 last=1015 duration=3.007686626s -+level=info ts=2019-12-11T23:00:02.509Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=135.735201ms -+level=info ts=2019-12-11T23:00:02.247Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=126.374582ms -+level=info ts=2019-12-11T23:00:02.154Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576094400000 maxt=1576101600000 ulid=01DVVK1JE1SSYY4EKS4HAT4SK3 duration=2.088724625s -+level=info ts=2019-12-11T23:00:01.873Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576094400000 maxt=1576101600000 ulid=01DVVK1JDWNVFGWS4JPY2K4CAS duration=1.813033164s -+level=info ts=2019-12-11T21:00:08.427Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576065600000 maxt=1576087200000 ulid=01DVVC60FYTRXZ9457XT10Y7AH sources=""[01DVTQJNDXSY7N5V60ZX7X1C3J 01DVTYECNW5T3AHHB2EXATYFMJ 01DVV5A3XWVRTNS7G7BBDQ9G2W]"" duration=2.925663083s -+level=info ts=2019-12-11T21:00:08.281Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576065600000 maxt=1576087200000 ulid=01DVVC60DBGMXD5DXR6Y5XWNXF sources=""[01DVTQJNDRV9NDCK9H2BCH04R0 01DVTYECNS4AZH3ZMCER87DYWG 01DVV5A3XRVMTB2E7V3MZ6RGCA]"" duration=2.862756811s -+level=info ts=2019-12-11T21:00:05.288Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1010 last=1012 duration=2.998716456s -+level=info ts=2019-12-11T21:00:05.204Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1010 last=1012 duration=3.013679702s -+level=info ts=2019-12-11T21:00:02.289Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=124.171081ms -+level=info ts=2019-12-11T21:00:02.190Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=114.925741ms -+level=info ts=2019-12-11T21:00:01.942Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576087200000 maxt=1576094400000 ulid=01DVVC5V5WESMMH77FZVCJ80Q8 duration=1.881893506s -+level=info ts=2019-12-11T21:00:01.837Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576087200000 maxt=1576094400000 ulid=01DVVC5V6145SMRFE0WR0P3YTQ duration=1.772164011s -+level=info ts=2019-12-11T19:00:05.276Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1007 last=1009 duration=3.031727362s -+level=info ts=2019-12-11T19:00:05.222Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1007 last=1009 duration=3.003072336s -+level=info ts=2019-12-11T19:00:02.244Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=125.675247ms -+level=info ts=2019-12-11T19:00:02.219Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=127.466308ms -+level=info ts=2019-12-11T19:00:01.888Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576080000000 maxt=1576087200000 ulid=01DVV5A3XRVMTB2E7V3MZ6RGCA duration=1.832443683s -+level=info ts=2019-12-11T19:00:01.845Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576080000000 maxt=1576087200000 ulid=01DVV5A3XWVRTNS7G7BBDQ9G2W duration=1.784935995s -+level=info ts=2019-12-11T18:31:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T18:24:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T17:00:05.233Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1004 last=1006 duration=3.008189996s -+level=info ts=2019-12-11T17:00:05.223Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1004 last=1006 duration=2.97892s -+level=info ts=2019-12-11T17:00:02.244Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=132.385042ms -+level=info ts=2019-12-11T17:00:02.225Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=125.500534ms -+level=info ts=2019-12-11T17:00:01.870Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576072800000 maxt=1576080000000 ulid=01DVTYECNW5T3AHHB2EXATYFMJ duration=1.810447322s -+level=info ts=2019-12-11T17:00:01.870Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576072800000 maxt=1576080000000 ulid=01DVTYECNS4AZH3ZMCER87DYWG duration=1.813347748s -+level=info ts=2019-12-11T15:00:16.297Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576000800000 maxt=1576065600000 ulid=01DVTQJY4WBY96QVV4XQJTR2JC sources=""[01DVSECF6Q4JXFDGMFQB3J1Z9E 01DVT2ZN0DMXXJJDHKS0M8JWMS 01DVTQJTX0GZ1S7J51CN1RJNQX]"" duration=7.308935842s -+level=info ts=2019-12-11T15:00:15.941Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576000800000 maxt=1576065600000 ulid=01DVTQJY58270MEPGVGGDZZRKJ sources=""[01DVSECF6J4NZRNHABZ2MSG7V7 01DVT2ZN4RB65KG77XPHPNVSAM 01DVTQJTYGFS18MWME9Z2NFJSW]"" duration=6.941637414s -+level=info ts=2019-12-11T15:00:08.544Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576044000000 maxt=1576065600000 ulid=01DVTQJTX0GZ1S7J51CN1RJNQX sources=""[01DVT2ZFNYB7DEH57ZX4HW2DAV 01DVT9V6XW9ENV15NHKR20T9B4 01DVTGPY5WTBSSEQ37JQ2VPCTQ]"" duration=2.880290482s -+level=info ts=2019-12-11T15:00:08.541Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576044000000 maxt=1576065600000 ulid=01DVTQJTYGFS18MWME9Z2NFJSW sources=""[01DVT2ZFP3R7RB9H6BS3JVAMXJ 01DVT9V6Y21E8YXRKNGA9RPB7D 01DVTGPY5XGARMV8B8VBWQ23W3]"" duration=2.829184147s -+level=info ts=2019-12-11T15:00:05.505Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1001 last=1003 duration=3.006477625s -+level=info ts=2019-12-11T15:00:05.452Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1001 last=1003 duration=2.990895181s -+level=info ts=2019-12-11T15:00:02.498Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=129.237566ms -+level=info ts=2019-12-11T15:00:02.461Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=129.961097ms -+level=info ts=2019-12-11T15:00:02.022Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576065600000 maxt=1576072800000 ulid=01DVTQJNDRV9NDCK9H2BCH04R0 duration=1.96598488s -+level=info ts=2019-12-11T15:00:01.933Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576065600000 maxt=1576072800000 ulid=01DVTQJNDXSY7N5V60ZX7X1C3J duration=1.871872199s -+level=info ts=2019-12-11T14:15:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T14:07:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T13:18:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T13:12:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T13:00:05.395Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=998 last=1000 duration=3.010358861s -+level=info ts=2019-12-11T13:00:05.249Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=998 last=1000 duration=3.032196282s -+level=info ts=2019-12-11T13:00:02.385Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=131.568186ms -+level=info ts=2019-12-11T13:00:02.217Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=136.017788ms -+level=info ts=2019-12-11T13:00:02.021Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576058400000 maxt=1576065600000 ulid=01DVTGPY5XGARMV8B8VBWQ23W3 duration=1.959903s -+level=info ts=2019-12-11T13:00:01.865Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576058400000 maxt=1576065600000 ulid=01DVTGPY5WTBSSEQ37JQ2VPCTQ duration=1.805149859s -+level=info ts=2019-12-11T11:46:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T11:39:44.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T11:35:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T11:26:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T11:15:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T11:06:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T11:01:24.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T11:00:05.591Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=995 last=997 duration=3.063684941s -+level=info ts=2019-12-11T11:00:05.297Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=995 last=997 duration=3.051047495s -+level=info ts=2019-12-11T11:00:02.527Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=131.530749ms -+level=info ts=2019-12-11T11:00:02.246Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=123.08975ms -+level=info ts=2019-12-11T11:00:02.096Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576051200000 maxt=1576058400000 ulid=01DVT9V6Y21E8YXRKNGA9RPB7D duration=2.029825916s -+level=info ts=2019-12-11T11:00:01.819Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576051200000 maxt=1576058400000 ulid=01DVT9V6XW9ENV15NHKR20T9B4 duration=1.7583013s -+level=info ts=2019-12-11T10:54:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T10:46:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T10:39:44.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T10:34:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T10:26:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T10:18:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T10:12:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T09:56:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T09:48:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T09:00:08.553Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576022400000 maxt=1576044000000 ulid=01DVT2ZN4RB65KG77XPHPNVSAM sources=""[01DVSEC9XXK1J3B0186KYQECZT 01DVSN815STR0D0B8245RWNF13 01DVSW3RDRKTJVEZWGYE07XBXE]"" duration=2.896352595s -+level=info ts=2019-12-11T09:00:08.407Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576022400000 maxt=1576044000000 ulid=01DVT2ZN0DMXXJJDHKS0M8JWMS sources=""[01DVSEC9XW0W8V42SPRR0YMM0X 01DVSN815W8YTW3DPQTJVRNTS4 01DVSW3RDWE1WHSM8AEW0ARA3S]"" duration=2.890101974s -+level=info ts=2019-12-11T09:00:05.444Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=992 last=994 duration=3.058184317s -+level=info ts=2019-12-11T09:00:05.306Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=992 last=994 duration=2.99204816s -+level=info ts=2019-12-11T09:00:02.385Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=128.295437ms -+level=info ts=2019-12-11T09:00:02.313Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=128.456638ms -+level=info ts=2019-12-11T09:00:02.023Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576044000000 maxt=1576051200000 ulid=01DVT2ZFP3R7RB9H6BS3JVAMXJ duration=1.955843851s -+level=info ts=2019-12-11T09:00:01.935Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576044000000 maxt=1576051200000 ulid=01DVT2ZFNYB7DEH57ZX4HW2DAV duration=1.873653026s -+level=info ts=2019-12-11T07:00:05.441Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=989 last=991 duration=3.013763908s -+level=info ts=2019-12-11T07:00:05.272Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=989 last=991 duration=2.979497994s -+level=info ts=2019-12-11T07:00:02.427Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=126.635643ms -+level=info ts=2019-12-11T07:00:02.293Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=121.051415ms -+level=info ts=2019-12-11T07:00:02.056Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576036800000 maxt=1576044000000 ulid=01DVSW3RDWE1WHSM8AEW0ARA3S duration=1.995603695s -+level=info ts=2019-12-11T07:00:01.941Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576036800000 maxt=1576044000000 ulid=01DVSW3RDRKTJVEZWGYE07XBXE duration=1.885680378s -+level=info ts=2019-12-11T06:20:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T06:14:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T05:02:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=5 to=4 -+level=info ts=2019-12-11T05:01:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=5 -+level=info ts=2019-12-11T05:00:05.488Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=986 last=988 duration=3.043360624s -+level=info ts=2019-12-11T05:00:05.288Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=986 last=988 duration=2.998209654s -+level=info ts=2019-12-11T05:00:02.445Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=130.642245ms -+level=info ts=2019-12-11T05:00:02.290Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=128.363621ms -+level=info ts=2019-12-11T05:00:02.066Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576029600000 maxt=1576036800000 ulid=01DVSN815STR0D0B8245RWNF13 duration=2.008689142s -+level=info ts=2019-12-11T05:00:01.938Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576029600000 maxt=1576036800000 ulid=01DVSN815W8YTW3DPQTJVRNTS4 duration=1.877943808s -+level=info ts=2019-12-11T04:55:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T04:35:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T04:28:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T04:15:24.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T04:07:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T04:03:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T03:57:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T03:52:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T03:43:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T03:32:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T03:24:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T03:19:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T03:12:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T03:00:08.325Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576000800000 maxt=1576022400000 ulid=01DVSECF6Q4JXFDGMFQB3J1Z9E sources=""[01DVRSS45W7DXE05RGBYGH58PY 01DVS0MVDWGK47AZ3HY5GQEMK4 01DVS7GJNW7BF3R6KK7GW291R0]"" duration=2.861556831s -+level=info ts=2019-12-11T03:00:08.255Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576000800000 maxt=1576022400000 ulid=01DVSECF6J4NZRNHABZ2MSG7V7 sources=""[01DVRSS4632MEB6SYC6SB7DTGE 01DVS0MVDR5Z67QJD6T94CXHRA 01DVS7GJNSYRFT48H9CDRP82YV]"" duration=2.796902205s -+level=info ts=2019-12-11T03:00:05.253Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=983 last=985 duration=3.004398083s -+level=info ts=2019-12-11T03:00:05.245Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=983 last=985 duration=3.023743067s -+level=info ts=2019-12-11T03:00:02.248Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=127.893231ms -+level=info ts=2019-12-11T03:00:02.221Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=132.662929ms -+level=info ts=2019-12-11T03:00:01.903Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576022400000 maxt=1576029600000 ulid=01DVSEC9XW0W8V42SPRR0YMM0X duration=1.842688968s -+level=info ts=2019-12-11T03:00:01.847Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576022400000 maxt=1576029600000 ulid=01DVSEC9XXK1J3B0186KYQECZT duration=1.78558499s -+level=info ts=2019-12-11T02:18:44.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=5 to=4 -+level=info ts=2019-12-11T02:18:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=5 -+level=info ts=2019-12-11T02:11:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T01:59:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-11T01:52:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-11T01:00:05.272Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=980 last=982 duration=3.025045534s -+level=info ts=2019-12-11T01:00:05.189Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=980 last=982 duration=2.992866718s -+level=info ts=2019-12-11T01:00:02.247Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=123.561834ms -+level=info ts=2019-12-11T01:00:02.196Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=108.589195ms -+level=info ts=2019-12-11T01:00:01.905Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576015200000 maxt=1576022400000 ulid=01DVS7GJNW7BF3R6KK7GW291R0 duration=1.844635186s -+level=info ts=2019-12-11T01:00:01.866Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576015200000 maxt=1576022400000 ulid=01DVS7GJNSYRFT48H9CDRP82YV duration=1.809175377s -+level=info ts=2019-12-11T00:31:25.063Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=7 to=5 -+level=info ts=2019-12-11T00:30:25.063Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=10 to=7 -+level=info ts=2019-12-11T00:29:55.063Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=14 to=10 -+level=info ts=2019-12-11T00:29:25.064Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=85 to=14 -+level=info ts=2019-12-11T00:29:15.063Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=5 to=85 -+level=info ts=2019-12-10T23:00:05.385Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=977 last=979 duration=3.157877457s -+level=info ts=2019-12-10T23:00:05.136Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=977 last=979 duration=3.013728541s -+level=info ts=2019-12-10T23:00:02.227Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=133.349607ms -+level=info ts=2019-12-10T23:00:02.123Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=110.615384ms -+level=info ts=2019-12-10T23:00:01.832Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576008000000 maxt=1576015200000 ulid=01DVS0MVDWGK47AZ3HY5GQEMK4 duration=1.772580137s -+level=info ts=2019-12-10T23:00:01.780Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576008000000 maxt=1576015200000 ulid=01DVS0MVDR5Z67QJD6T94CXHRA duration=1.724738556s -+level=info ts=2019-12-10T21:00:18.426Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1575936000000 maxt=1576000800000 ulid=01DVRSSD7JM2Y5MQNCZ7QZRSWK sources=""[01DVQGJZNRDFP0P161HP7GJX44 01DVR563YKZY789FPAM3DD8DKX 01DVRSS9Q04WN7F254ZCSQ4YP5]"" duration=9.096172888s -+level=info ts=2019-12-10T21:00:16.394Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1575936000000 maxt=1576000800000 ulid=01DVRSSCTH9ED87EXNSR9J8PE6 sources=""[01DVQGJY3RF9X7R93QY6V579W3 01DVR563QG1PZ1AY7RPKSCMKND 01DVRSS9HMN2EC6QQQ2XP7R90D]"" duration=7.481260173s -+level=info ts=2019-12-10T21:00:08.859Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1575979200000 maxt=1576000800000 ulid=01DVRSS9Q04WN7F254ZCSQ4YP5 sources=""[01DVR55YDW6Q96ZHGXD1T7HVF4 01DVRC1NNWE3ZMEQ6035ZJTF49 01DVRJXCXW4W35MBB4E9RXX1QD]"" duration=3.130772971s -+level=info ts=2019-12-10T21:00:08.473Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1575979200000 maxt=1576000800000 ulid=01DVRSS9HMN2EC6QQQ2XP7R90D sources=""[01DVR55YE3CA9B12S48FTTFSVD 01DVRC1NP4CCWPRMCC7667R1FZ 01DVRJXCY0DY2R6DVWWGNXNPRQ]"" duration=2.917254733s -+level=info ts=2019-12-10T21:00:05.490Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=974 last=976 duration=3.159908932s -+level=info ts=2019-12-10T21:00:05.339Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=974 last=976 duration=3.090229598s -+level=info ts=2019-12-10T21:00:02.330Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=129.067188ms -+level=info ts=2019-12-10T21:00:02.249Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=108.272575ms -+level=info ts=2019-12-10T21:00:01.804Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576000800000 maxt=1576008000000 ulid=01DVRSS45W7DXE05RGBYGH58PY duration=1.743999568s -+level=info ts=2019-12-10T21:00:01.774Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576000800000 maxt=1576008000000 ulid=01DVRSS4632MEB6SYC6SB7DTGE duration=1.706876662s -+level=info ts=2019-12-10T20:35:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4 -+level=info ts=2019-12-10T20:29:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-10T19:00:06.012Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=971 last=973 duration=3.248527735s -+level=info ts=2019-12-10T19:00:05.641Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=971 last=973 duration=3.1047498s -+level=info ts=2019-12-10T19:00:02.763Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=210.397069ms -+level=info ts=2019-12-10T19:00:02.536Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=171.333573ms -+level=info ts=2019-12-10T19:00:02.259Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1575993600000 maxt=1576000800000 ulid=01DVRJXCXW4W35MBB4E9RXX1QD duration=2.199162459s -+level=info ts=2019-12-10T19:00:02.080Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1575993600000 maxt=1576000800000 ulid=01DVRJXCY0DY2R6DVWWGNXNPRQ duration=2.016321337s -+level=info ts=2019-12-10T17:00:05.549Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=968 last=970 duration=3.183706512s -+level=info ts=2019-12-10T17:00:05.319Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=968 last=970 duration=3.088304654s -+level=info ts=2019-12-10T17:00:02.365Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=133.008474ms -+level=info ts=2019-12-10T17:00:02.231Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=114.89207ms -+level=info ts=2019-12-10T17:00:01.942Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1575986400000 maxt=1575993600000 ulid=01DVRC1NNWE3ZMEQ6035ZJTF49 duration=1.881731957s -+level=info ts=2019-12-10T17:00:01.864Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1575986400000 maxt=1575993600000 ulid=01DVRC1NP4CCWPRMCC7667R1FZ duration=1.795832733s -+level=info ts=2019-12-10T15:00:09.507Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1575957600000 maxt=1575979200000 ulid=01DVR563YKZY789FPAM3DD8DKX sources=""[01DVQGJRNW1DY7K8KW6B2RY4FF 01DVQQEFXWYP18MDFKY58VJCSG 01DVQYA75W7ERXK6FBEMYYEX6S]"" duration=3.791514409s -+level=info ts=2019-12-10T15:00:08.520Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1575957600000 maxt=1575979200000 ulid=01DVR563QG1PZ1AY7RPKSCMKND sources=""[01DVQGJRP3CC86KKVR5MZ1YYTK 01DVQQEFY0CEBFC4QE02GW9S4F 01DVQYA75TMRETNZRWPV46G5Y0]"" duration=3.032106451s -+level=info ts=2019-12-10T15:00:05.484Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=965 last=967 duration=3.124836463s -+level=info ts=2019-12-10T15:00:05.277Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=965 last=967 duration=3.055778688s -+level=info ts=2019-12-10T15:00:02.359Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=130.554076ms -+level=info ts=2019-12-10T15:00:02.221Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=114.665423ms -+level=info ts=2019-12-10T15:00:01.972Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1575979200000 maxt=1575986400000 ulid=01DVR55YDW6Q96ZHGXD1T7HVF4 duration=1.912209972s -+level=info ts=2019-12-10T15:00:01.878Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1575979200000 maxt=1575986400000 ulid=01DVR55YE3CA9B12S48FTTFSVD duration=1.811316924s -+2019-12-10 13:50:13.598594155 +0000 UTC -+level=info ts=2019-12-10T13:50:13.596Z caller=main.go:771 msg=""Completed loading of configuration file"" filename=/etc/prometheus/prometheus.yml -+level=info ts=2019-12-10T13:50:13.553Z caller=kubernetes.go:192 component=""discovery manager scrape"" discovery=k8s msg=""Using pod service account via in-cluster config"" -+level=info ts=2019-12-10T13:50:13.552Z caller=kubernetes.go:192 component=""discovery manager scrape"" discovery=k8s msg=""Using pod service account via in-cluster config"" -+level=info ts=2019-12-10T13:50:13.551Z caller=kubernetes.go:192 component=""discovery manager scrape"" discovery=k8s msg=""Using pod service account via in-cluster config"" -+level=info ts=2019-12-10T13:50:13.551Z caller=kubernetes.go:192 component=""discovery manager scrape"" discovery=k8s msg=""Using pod service account via in-cluster config"" -+level=info ts=2019-12-10T13:50:13.546Z caller=main.go:743 msg=""Loading configuration file"" filename=/etc/prometheus/prometheus.yml -+curl -X POST --fail -o - -sS http://localhost:80/prometheus/-/reload -+2019/12/10 13:50:13 DEBUG: ""/etc/prometheus/..2019_12_05_07_22_08.390693530"": REMOVE at 2019-12-10 13:50:13.341280871 +0000 UTC -+2019/12/10 13:50:13 DEBUG: ""/etc/prometheus/..data"": REMOVE at 2019-12-10 13:50:13.341280871 +0000 UTC -+2019/12/10 13:50:13 DEBUG: ""/etc/prometheus/..data/prometheus.yml"": REMOVE at 2019-12-10 13:50:13.341280871 +0000 UTC -+2019/12/10 13:50:13 DEBUG: ""/etc/prometheus/..data/recording.rules"": REMOVE at 2019-12-10 13:50:13.341280871 +0000 UTC -+2019/12/10 13:50:13 DEBUG: ""/etc/prometheus/..data/alerts.rules"": REMOVE at 2019-12-10 13:50:13.341280871 +0000 UTC -+2019/12/10 13:50:13 DEBUG: Watching /etc/prometheus/..data -+2019/12/10 13:50:13 DEBUG: ""/etc/prometheus/..data"": CREATE at 2019-12-10 13:50:13.341280871 +0000 UTC -+2019/12/10 13:50:13 DEBUG: ""/etc/prometheus/..data_tmp"": RENAME at 2019-12-10 13:50:13.341280871 +0000 UTC -+2019/12/10 13:50:13 DEBUG: ""/etc/prometheus/..2019_12_10_13_50_13.738754268"": CHMOD at 2019-12-10 13:50:13.341280871 +0000 UTC -+2019/12/10 13:50:13 DEBUG: Watching /etc/prometheus/..2019_12_10_13_50_13.738754268 -+2019/12/10 13:50:13 DEBUG: ""/etc/prometheus/..2019_12_10_13_50_13.738754268"": CREATE at 2019-12-10 13:50:13.341280871 +0000 UTC -+2019-12-10 13:49:53.296090488 +0000 UTC -+level=info ts=2019-12-10T13:49:53.294Z caller=main.go:771 msg=""Completed loading of configuration file"" filename=/etc/prometheus/prometheus.yml -+level=info ts=2019-12-10T13:49:53.254Z caller=kubernetes.go:192 component=""discovery manager scrape"" discovery=k8s msg=""Using pod service account via in-cluster config"" -+level=info ts=2019-12-10T13:49:53.253Z caller=kubernetes.go:192 component=""discovery manager scrape"" discovery=k8s msg=""Using pod service account via in-cluster config"" -+level=info ts=2019-12-10T13:49:53.252Z caller=kubernetes.go:192 component=""discovery manager scrape"" discovery=k8s msg=""Using pod service account via in-cluster config"" -+level=info ts=2019-12-10T13:49:53.251Z caller=kubernetes.go:192 component=""discovery manager scrape"" discovery=k8s msg=""Using pod service account via in-cluster config"" -+level=info ts=2019-12-10T13:49:53.248Z caller=main.go:743 msg=""Loading configuration file"" filename=/etc/prometheus/prometheus.yml -+curl -X POST --fail -o - -sS http://localhost:80/prometheus/-/reload -+2019/12/10 13:49:53 DEBUG: ""/etc/prometheus/..2019_12_05_07_22_36.096562123"": REMOVE at 2019-12-10 13:49:53.044039978 +0000 UTC -+2019/12/10 13:49:53 DEBUG: ""/etc/prometheus/..data"": REMOVE at 2019-12-10 13:49:53.043039894 +0000 UTC -+2019/12/10 13:49:53 DEBUG: ""/etc/prometheus/..data/prometheus.yml"": REMOVE at 2019-12-10 13:49:53.043039894 +0000 UTC -+2019/12/10 13:49:53 DEBUG: ""/etc/prometheus/..data/recording.rules"": REMOVE at 2019-12-10 13:49:53.043039894 +0000 UTC -+2019/12/10 13:49:53 DEBUG: ""/etc/prometheus/..data/alerts.rules"": REMOVE at 2019-12-10 13:49:53.043039894 +0000 UTC -+2019/12/10 13:49:53 DEBUG: Watching /etc/prometheus/..data -+2019/12/10 13:49:53 DEBUG: ""/etc/prometheus/..data"": CREATE at 2019-12-10 13:49:53.043039894 +0000 UTC -+2019/12/10 13:49:53 DEBUG: ""/etc/prometheus/..data_tmp"": RENAME at 2019-12-10 13:49:53.044039978 +0000 UTC -+2019/12/10 13:49:53 DEBUG: ""/etc/prometheus/..2019_12_10_13_49_53.355073198"": CHMOD at 2019-12-10 13:49:53.043039894 +0000 UTC -+2019/12/10 13:49:53 DEBUG: Watching /etc/prometheus/..2019_12_10_13_49_53.355073198 -+2019/12/10 13:49:53 DEBUG: ""/etc/prometheus/..2019_12_10_13_49_53.355073198"": CREATE at 2019-12-10 13:49:53.043039894 +0000 UTC -+level=info ts=2019-12-10T13:00:06.007Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=962 last=964 duration=3.145696569s -+level=info ts=2019-12-10T13:00:05.601Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=962 last=964 duration=3.062580976s -+level=info ts=2019-12-10T13:00:02.861Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=169.077152ms -+level=info ts=2019-12-10T13:00:02.539Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=152.173262ms -+level=info ts=2019-12-10T13:00:02.425Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1575972000000 maxt=1575979200000 ulid=01DVQYA75W7ERXK6FBEMYYEX6S duration=2.364066751s -+level=info ts=2019-12-10T13:00:02.150Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1575972000000 maxt=1575979200000 ulid=01DVQYA75TMRETNZRWPV46G5Y0 duration=2.092629264s -+level=info ts=2019-12-10T11:48:14.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=2 to=1 -+level=info ts=2019-12-10T11:47:34.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=3 to=2 -+level=info ts=2019-12-10T11:47:14.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=5 to=3 -+level=info ts=2019-12-10T11:46:54.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=7 to=5 -+level=info ts=2019-12-10T11:46:34.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=10 to=7 -+level=info ts=2019-12-10T11:46:14.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=16 to=10 -+level=info ts=2019-12-10T11:45:54.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=26 to=16 -+level=info ts=2019-12-10T11:45:34.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=39 to=26 -+level=info ts=2019-12-10T11:45:14.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=63 to=39 -+level=info ts=2019-12-10T11:44:54.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=100 to=63 -+level=info ts=2019-12-10T11:44:45.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=2 to=1 -+level=info ts=2019-12-10T11:44:34.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=156 to=100 -+level=info ts=2019-12-10T11:44:15.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=3 to=2 -+level=info ts=2019-12-10T11:44:14.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=253 to=156 -+level=info ts=2019-12-10T11:43:55.064Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=4 to=3 -+level=info ts=2019-12-10T11:43:54.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=405 to=253 -+level=info ts=2019-12-10T11:43:45.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=5 to=4 -+level=info ts=2019-12-10T11:43:34.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=619 to=405 -+level=info ts=2019-12-10T11:43:25.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=8 to=5 -+level=info ts=2019-12-10T11:43:14.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=1000 to=619 -+level=info ts=2019-12-10T11:43:05.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=11 to=8 -+level=info ts=2019-12-10T11:42:45.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=22 to=11 -+level=info ts=2019-12-10T11:42:25.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=16 to=22 -+level=error ts=2019-12-10T11:42:11.074Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 400 Bad Request: out of order sample"" -+level=error ts=2019-12-10T11:42:11.073Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 400 Bad Request: out of order sample"" -+level=info ts=2019-12-10T11:42:05.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=11 to=16 -+level=info ts=2019-12-10T11:41:55.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=8 to=11 -+level=info ts=2019-12-10T11:39:35.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=6 to=8 -+level=info ts=2019-12-10T11:38:55.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=4 to=6 -+level=info ts=2019-12-10T11:38:35.064Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=2 to=4 -+level=error ts=2019-12-10T11:38:12.281Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=84 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples"" -+level=error ts=2019-12-10T11:38:12.281Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=69 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 69 samples"" -+level=error ts=2019-12-10T11:38:12.235Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:12.235Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:12.204Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples"" -+level=error ts=2019-12-10T11:38:12.183Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=89 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples"" -+level=error ts=2019-12-10T11:38:12.129Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=88 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples"" -+level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=91 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 91 samples"" -+level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=86 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples"" -+level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=90 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples"" -+level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=56 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 56 samples"" -+level=error ts=2019-12-10T11:38:12.125Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=52 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 52 samples"" -+level=error ts=2019-12-10T11:38:12.124Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:12.123Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=75 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples"" -+level=error ts=2019-12-10T11:38:12.122Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=69 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 69 samples"" -+level=error ts=2019-12-10T11:38:12.120Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:12.120Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=67 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 67 samples"" -+level=error ts=2019-12-10T11:38:12.120Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=78 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 78 samples"" -+level=error ts=2019-12-10T11:38:12.120Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:12.119Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:12.119Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:12.081Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples"" -+level=error ts=2019-12-10T11:38:12.037Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:12.037Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:12.037Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:12.024Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:12.024Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:12.024Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=89 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples"" -+level=error ts=2019-12-10T11:38:11.920Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:11.917Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:11.916Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:11.913Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=95 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples"" -+level=error ts=2019-12-10T11:38:11.913Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=82 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples"" -+level=error ts=2019-12-10T11:38:11.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:11.721Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:11.176Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=99 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples"" -+level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=85 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples"" -+level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=70 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples"" -+level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=72 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples"" -+level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples"" -+level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=46 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 46 samples"" -+level=error ts=2019-12-10T11:38:11.143Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:11.143Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=83 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples"" -+level=error ts=2019-12-10T11:38:11.143Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:11.143Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=92 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 92 samples"" -+level=error ts=2019-12-10T11:38:11.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:11.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:11.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=84 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples"" -+level=error ts=2019-12-10T11:38:11.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:10.955Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:10.953Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:10.942Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=86 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples"" -+level=error ts=2019-12-10T11:38:10.942Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:10.942Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:10.941Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=58 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 58 samples"" -+level=error ts=2019-12-10T11:38:10.941Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:10.940Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:10.940Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:10.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:10.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=23 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 23 samples"" -+level=error ts=2019-12-10T11:38:10.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=59 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 59 samples"" -+level=error ts=2019-12-10T11:38:10.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=81 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples"" -+level=error ts=2019-12-10T11:38:10.878Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:10.878Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:10.878Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:10.878Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:10.877Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples"" -+level=error ts=2019-12-10T11:38:10.877Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=70 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples"" -+level=error ts=2019-12-10T11:38:10.877Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=72 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples"" -+level=error ts=2019-12-10T11:38:10.039Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.902Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=79 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples"" -+level=error ts=2019-12-10T11:38:09.901Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.901Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.901Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=92 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 92 samples"" -+level=error ts=2019-12-10T11:38:09.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=95 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples"" -+level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=73 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 73 samples"" -+level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=68 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 68 samples"" -+level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=35 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 35 samples"" -+level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.895Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.895Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples"" -+level=error ts=2019-12-10T11:38:09.895Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples"" -+level=error ts=2019-12-10T11:38:09.876Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=88 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples"" -+level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=92 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 92 samples"" -+level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=85 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples"" -+level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.832Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.825Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.825Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples"" -+level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=17 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 17 samples"" -+level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.815Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=94 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 94 samples"" -+level=error ts=2019-12-10T11:38:09.806Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=81 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples"" -+level=error ts=2019-12-10T11:38:09.736Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.736Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=48 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 48 samples"" -+level=error ts=2019-12-10T11:38:09.735Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples"" -+level=error ts=2019-12-10T11:38:09.735Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=67 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 67 samples"" -+level=error ts=2019-12-10T11:38:09.735Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=43 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 43 samples"" -+level=error ts=2019-12-10T11:38:09.735Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=90 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples"" -+level=error ts=2019-12-10T11:38:09.735Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.735Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=19 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 19 samples"" -+level=error ts=2019-12-10T11:38:09.733Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=79 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples"" -+level=error ts=2019-12-10T11:38:09.700Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:09.700Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=50 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 50 samples"" -+level=error ts=2019-12-10T11:38:09.700Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=77 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples"" -+level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=86 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples"" -+level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=83 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples"" -+level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.796Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=90 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples"" -+level=error ts=2019-12-10T11:38:08.796Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=37 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 37 samples"" -+level=error ts=2019-12-10T11:38:08.796Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=67 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 67 samples"" -+level=error ts=2019-12-10T11:38:08.796Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=77 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples"" -+level=error ts=2019-12-10T11:38:08.795Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=74 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples"" -+level=error ts=2019-12-10T11:38:08.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=94 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 94 samples"" -+level=error ts=2019-12-10T11:38:08.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=89 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples"" -+level=error ts=2019-12-10T11:38:08.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.694Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=77 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples"" -+level=error ts=2019-12-10T11:38:08.694Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.694Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.693Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=58 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 58 samples"" -+level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=91 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 91 samples"" -+level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=59 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 59 samples"" -+level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.691Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.678Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=75 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples"" -+level=error ts=2019-12-10T11:38:08.677Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.648Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.647Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=90 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples"" -+level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=71 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples"" -+level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.602Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=83 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples"" -+level=error ts=2019-12-10T11:38:08.596Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=84 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples"" -+level=error ts=2019-12-10T11:38:08.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.553Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=95 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples"" -+level=error ts=2019-12-10T11:38:08.552Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.552Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=73 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 73 samples"" -+level=error ts=2019-12-10T11:38:08.552Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=87 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 87 samples"" -+level=error ts=2019-12-10T11:38:08.541Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.501Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples"" -+level=error ts=2019-12-10T11:38:08.501Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=91 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 91 samples"" -+level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=72 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples"" -+level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples"" -+level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=70 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples"" -+level=error ts=2019-12-10T11:38:08.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=65 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 65 samples"" -+level=error ts=2019-12-10T11:38:08.441Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=88 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples"" -+level=error ts=2019-12-10T11:38:08.433Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.431Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=89 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples"" -+level=error ts=2019-12-10T11:38:08.407Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=80 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples"" -+level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.394Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.394Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.394Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=82 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples"" -+level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=90 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples"" -+level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.604Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.602Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=60 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 60 samples"" -+level=error ts=2019-12-10T11:38:07.602Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=75 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples"" -+level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=71 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples"" -+level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples"" -+level=error ts=2019-12-10T11:38:07.600Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.599Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.599Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples"" -+level=error ts=2019-12-10T11:38:07.598Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=93 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 93 samples"" -+level=error ts=2019-12-10T11:38:07.598Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=72 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples"" -+level=error ts=2019-12-10T11:38:07.599Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.598Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.598Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.596Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=70 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples"" -+level=error ts=2019-12-10T11:38:07.596Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=74 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples"" -+level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples"" -+level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=99 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples"" -+level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=91 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 91 samples"" -+level=error ts=2019-12-10T11:38:07.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=73 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 73 samples"" -+level=error ts=2019-12-10T11:38:07.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=81 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples"" -+level=error ts=2019-12-10T11:38:07.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=86 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples"" -+level=error ts=2019-12-10T11:38:07.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=53 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 53 samples"" -+level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=39 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 39 samples"" -+level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.587Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.513Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.512Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=59 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 59 samples"" -+level=error ts=2019-12-10T11:38:07.512Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.510Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=79 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples"" -+level=error ts=2019-12-10T11:38:07.506Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.506Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=67 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 67 samples"" -+level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples"" -+level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=75 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples"" -+level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=60 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 60 samples"" -+level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=99 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples"" -+level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples"" -+level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.494Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.489Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.488Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.484Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.483Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.483Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.480Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.480Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=66 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 66 samples"" -+level=error ts=2019-12-10T11:38:07.480Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=71 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples"" -+level=error ts=2019-12-10T11:38:07.480Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=66 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 66 samples"" -+level=error ts=2019-12-10T11:38:07.478Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=76 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 76 samples"" -+level=error ts=2019-12-10T11:38:07.460Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=77 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples"" -+level=error ts=2019-12-10T11:38:07.455Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.455Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.455Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=65 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 65 samples"" -+level=error ts=2019-12-10T11:38:07.454Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=80 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples"" -+level=error ts=2019-12-10T11:38:07.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=80 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples"" -+level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=37 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 37 samples"" -+level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=30 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 30 samples"" -+level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=74 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples"" -+level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=79 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples"" -+level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=70 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples"" -+level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples"" -+level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=84 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples"" -+level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=74 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples"" -+level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=63 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 63 samples"" -+level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=95 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples"" -+level=error ts=2019-12-10T11:38:07.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=99 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples"" -+level=error ts=2019-12-10T11:38:07.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=57 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 57 samples"" -+level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.444Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.444Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.444Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=84 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples"" -+level=error ts=2019-12-10T11:38:07.443Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.390Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.388Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=85 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples"" -+level=error ts=2019-12-10T11:38:07.388Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.388Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=89 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples"" -+level=error ts=2019-12-10T11:38:07.387Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.387Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples"" -+level=error ts=2019-12-10T11:38:07.387Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=63 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 63 samples"" -+level=error ts=2019-12-10T11:38:07.376Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.358Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=84 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples"" -+level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=82 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples"" -+level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=79 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples"" -+level=error ts=2019-12-10T11:38:07.310Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.259Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.259Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=85 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples"" -+level=error ts=2019-12-10T11:38:07.045Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.044Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=83 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples"" -+level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.043Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.043Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=69 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 69 samples"" -+level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:07.041Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=49 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 49 samples"" -+level=error ts=2019-12-10T11:38:06.947Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=81 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples"" -+level=error ts=2019-12-10T11:38:06.947Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.946Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.944Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.943Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=75 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples"" -+level=error ts=2019-12-10T11:38:06.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.938Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=95 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples"" -+level=error ts=2019-12-10T11:38:06.937Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.848Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=54 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 54 samples"" -+level=error ts=2019-12-10T11:38:06.848Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.842Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.841Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.747Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=85 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples"" -+level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=65 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 65 samples"" -+level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.715Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.646Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=95 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples"" -+level=error ts=2019-12-10T11:38:06.646Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=37 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 37 samples"" -+level=error ts=2019-12-10T11:38:06.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=82 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples"" -+level=error ts=2019-12-10T11:38:06.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples"" -+level=error ts=2019-12-10T11:38:06.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.639Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.638Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=93 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 93 samples"" -+level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=95 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples"" -+level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=56 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 56 samples"" -+level=error ts=2019-12-10T11:38:06.590Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.590Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=75 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples"" -+level=error ts=2019-12-10T11:38:06.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.588Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=83 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples"" -+level=error ts=2019-12-10T11:38:06.588Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.581Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.581Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.563Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=77 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples"" -+level=error ts=2019-12-10T11:38:06.563Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.563Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.562Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.562Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.561Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.560Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.551Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.550Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.544Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.544Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples"" -+level=error ts=2019-12-10T11:38:06.542Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.542Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.542Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.540Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.538Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.538Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.512Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.506Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.505Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=69 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 69 samples"" -+level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=79 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples"" -+level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples"" -+level=error ts=2019-12-10T11:38:06.489Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=77 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples"" -+level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=83 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples"" -+level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples"" -+level=error ts=2019-12-10T11:38:06.460Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.460Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.460Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=75 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples"" -+level=error ts=2019-12-10T11:38:06.459Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=85 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples"" -+level=error ts=2019-12-10T11:38:06.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=85 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples"" -+level=error ts=2019-12-10T11:38:06.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=75 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples"" -+level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=70 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples"" -+level=error ts=2019-12-10T11:38:06.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples"" -+level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=66 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 66 samples"" -+level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=80 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples"" -+level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=33 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 33 samples"" -+level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=75 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples"" -+level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=71 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples"" -+level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=62 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 62 samples"" -+level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=82 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples"" -+level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=58 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 58 samples"" -+level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=79 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples"" -+level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=72 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples"" -+level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=82 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples"" -+level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples"" -+level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=86 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples"" -+level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.353Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=60 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 60 samples"" -+level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=22 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 22 samples"" -+level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples"" -+level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.347Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.347Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.346Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=82 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples"" -+level=error ts=2019-12-10T11:38:06.346Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.340Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=74 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples"" -+level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.338Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.338Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.337Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.330Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.329Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.329Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.328Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=83 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples"" -+level=error ts=2019-12-10T11:38:06.328Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.328Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.328Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples"" -+level=error ts=2019-12-10T11:38:06.327Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=79 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples"" -+level=error ts=2019-12-10T11:38:06.244Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.231Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.230Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.229Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.229Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.229Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=94 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 94 samples"" -+level=error ts=2019-12-10T11:38:06.206Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples"" -+level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.198Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.091Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=81 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples"" -+level=error ts=2019-12-10T11:38:06.091Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.091Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.088Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:06.087Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:05.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:05.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:05.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples"" -+level=error ts=2019-12-10T11:38:05.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=info ts=2019-12-10T11:38:05.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=1 to=2 -+level=error ts=2019-12-10T11:38:04.977Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.976Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.821Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.821Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples"" -+level=error ts=2019-12-10T11:38:04.821Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.815Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.815Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.720Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples"" -+level=error ts=2019-12-10T11:38:04.720Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.720Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.719Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples"" -+level=error ts=2019-12-10T11:38:04.719Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.717Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.716Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=87 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 87 samples"" -+level=error ts=2019-12-10T11:38:04.716Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=99 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples"" -+level=error ts=2019-12-10T11:38:04.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.709Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=66 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 66 samples"" -+level=error ts=2019-12-10T11:38:04.709Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=64 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 64 samples"" -+level=error ts=2019-12-10T11:38:04.631Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.631Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=84 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples"" -+level=error ts=2019-12-10T11:38:04.631Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.627Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.626Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.626Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.626Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.621Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.620Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.620Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.620Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.619Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.136Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.136Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.135Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.018Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.018Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.017Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.016Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.015Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.015Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.013Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.013Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=80 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples"" -+level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.010Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.009Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.009Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.008Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.007Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=88 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples"" -+level=error ts=2019-12-10T11:38:04.007Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.007Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.005Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.005Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.004Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=84 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples"" -+level=error ts=2019-12-10T11:38:04.003Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.002Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.002Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:04.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.923Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.923Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=79 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples"" -+level=error ts=2019-12-10T11:38:03.922Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.908Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.908Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.907Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples"" -+level=error ts=2019-12-10T11:38:03.907Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.906Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=83 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples"" -+level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=71 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples"" -+level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.902Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.901Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=86 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples"" -+level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=84 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples"" -+level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=90 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples"" -+level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.723Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=80 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples"" -+level=error ts=2019-12-10T11:38:03.723Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.723Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=76 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 76 samples"" -+level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=95 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples"" -+level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=88 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples"" -+level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=78 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 78 samples"" -+level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples"" -+`, ""\n"") -diff --git a/pkg/chunkenc/util_test.go b/pkg/chunkenc/util_test.go -new file mode 100644 -index 0000000000000..f906bd93442b4 ---- /dev/null -+++ b/pkg/chunkenc/util_test.go -@@ -0,0 +1,53 @@ -+package chunkenc -+ -+import ( -+ ""time"" -+ -+ ""github.com/grafana/loki/pkg/chunkenc/testdata"" -+ ""github.com/grafana/loki/pkg/logproto"" -+) -+ -+func logprotoEntry(ts int64, line string) *logproto.Entry { -+ return &logproto.Entry{ -+ Timestamp: time.Unix(0, ts), -+ Line: line, -+ } -+} -+ -+func generateData(enc Encoding) []Chunk { -+ chunks := []Chunk{} -+ i := int64(0) -+ for n := 0; n < 50; n++ { -+ entry := logprotoEntry(0, testdata.LogString(0)) -+ c := NewMemChunk(enc) -+ for c.SpaceFor(entry) { -+ _ = c.Append(entry) -+ i++ -+ entry = logprotoEntry(i, testdata.LogString(i)) -+ } -+ c.Close() -+ chunks = append(chunks, c) -+ } -+ return chunks -+} -+ -+func fillChunk(c Chunk) int64 { -+ i := int64(0) -+ inserted := int64(0) -+ entry := &logproto.Entry{ -+ Timestamp: time.Unix(0, 0), -+ Line: testdata.LogString(i), -+ } -+ for c.SpaceFor(entry) { -+ err := c.Append(entry) -+ if err != nil { -+ panic(err) -+ } -+ i++ -+ inserted += int64(len(entry.Line)) -+ entry.Timestamp = time.Unix(0, i) -+ entry.Line = testdata.LogString(i) -+ -+ } -+ return inserted -+} -diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go -index ccf2aee0f25ac..53bc7a0f2bca3 100644 ---- a/pkg/ingester/ingester.go -+++ b/pkg/ingester/ingester.go -@@ -4,6 +4,7 @@ import ( - ""context"" - ""errors"" - ""flag"" -+ ""fmt"" - ""net/http"" - ""sync"" - ""time"" -@@ -18,6 +19,7 @@ import ( - ""github.com/cortexproject/cortex/pkg/ring"" - ""github.com/cortexproject/cortex/pkg/util"" - -+ ""github.com/grafana/loki/pkg/chunkenc"" - ""github.com/grafana/loki/pkg/ingester/client"" - ""github.com/grafana/loki/pkg/logproto"" - ""github.com/grafana/loki/pkg/util/validation"" -@@ -48,6 +50,7 @@ type Config struct { - MaxChunkIdle time.Duration `yaml:""chunk_idle_period""` - BlockSize int `yaml:""chunk_block_size""` - TargetChunkSize int `yaml:""chunk_target_size""` -+ ChunkEncoding string `yaml:""chunk_encoding""` - - // For testing, you can override the address and ID of this ingester. - ingesterClientFactory func(cfg client.Config, addr string) (grpc_health_v1.HealthClient, error) -@@ -65,6 +68,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.DurationVar(&cfg.MaxChunkIdle, ""ingester.chunks-idle-period"", 30*time.Minute, """") - f.IntVar(&cfg.BlockSize, ""ingester.chunks-block-size"", 256*1024, """") - f.IntVar(&cfg.TargetChunkSize, ""ingester.chunk-target-size"", 0, """") -+ f.StringVar(&cfg.ChunkEncoding, ""ingester.chunk-encoding"", chunkenc.EncGZIP.String(), fmt.Sprintf(""The algorithm to use for compressing chunk. (%s)"", chunkenc.SupportedEncoding())) - } - - // Ingester builds chunks for incoming log streams. -@@ -89,7 +93,8 @@ type Ingester struct { - flushQueues []*util.PriorityQueue - flushQueuesDone sync.WaitGroup - -- limits *validation.Overrides -+ limits *validation.Overrides -+ factory func() chunkenc.Chunk - } - - // ChunkStore is the interface we need to store chunks. -@@ -102,6 +107,10 @@ func New(cfg Config, clientConfig client.Config, store ChunkStore, limits *valid - if cfg.ingesterClientFactory == nil { - cfg.ingesterClientFactory = client.New - } -+ enc, err := chunkenc.ParseEncoding(cfg.ChunkEncoding) -+ if err != nil { -+ return nil, err -+ } - - i := &Ingester{ - cfg: cfg, -@@ -112,6 +121,9 @@ func New(cfg Config, clientConfig client.Config, store ChunkStore, limits *valid - flushQueues: make([]*util.PriorityQueue, cfg.ConcurrentFlushes), - quitting: make(chan struct{}), - limits: limits, -+ factory: func() chunkenc.Chunk { -+ return chunkenc.NewMemChunkSize(enc, cfg.BlockSize, cfg.TargetChunkSize) -+ }, - } - - i.flushQueuesDone.Add(cfg.ConcurrentFlushes) -@@ -120,7 +132,6 @@ func New(cfg Config, clientConfig client.Config, store ChunkStore, limits *valid - go i.flushLoop(j) - } - -- var err error - i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, ""ingester"") - if err != nil { - return nil, err -@@ -191,7 +202,7 @@ func (i *Ingester) getOrCreateInstance(instanceID string) *instance { - defer i.instancesMtx.Unlock() - inst, ok = i.instances[instanceID] - if !ok { -- inst = newInstance(instanceID, i.cfg.BlockSize, i.cfg.TargetChunkSize, i.limits) -+ inst = newInstance(instanceID, i.factory, i.limits) - i.instances[instanceID] = inst - } - return inst -diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go -index f7a295e515189..a9d4d6e78dbae 100644 ---- a/pkg/ingester/instance.go -+++ b/pkg/ingester/instance.go -@@ -16,6 +16,7 @@ import ( - ""github.com/cortexproject/cortex/pkg/ingester/index"" - cutil ""github.com/cortexproject/cortex/pkg/util"" - -+ ""github.com/grafana/loki/pkg/chunkenc"" - ""github.com/grafana/loki/pkg/helpers"" - ""github.com/grafana/loki/pkg/iter"" - ""github.com/grafana/loki/pkg/logproto"" -@@ -60,15 +61,14 @@ type instance struct { - streamsCreatedTotal prometheus.Counter - streamsRemovedTotal prometheus.Counter - -- blockSize int -- targetChunkSize int // Compressed bytes -- tailers map[uint32]*tailer -- tailerMtx sync.RWMutex -+ tailers map[uint32]*tailer -+ tailerMtx sync.RWMutex - -- limits *validation.Overrides -+ limits *validation.Overrides -+ factory func() chunkenc.Chunk - } - --func newInstance(instanceID string, blockSize, targetChunkSize int, limits *validation.Overrides) *instance { -+func newInstance(instanceID string, factory func() chunkenc.Chunk, limits *validation.Overrides) *instance { - i := &instance{ - streams: map[model.Fingerprint]*stream{}, - index: index.New(), -@@ -77,10 +77,9 @@ func newInstance(instanceID string, blockSize, targetChunkSize int, limits *vali - streamsCreatedTotal: streamsCreatedTotal.WithLabelValues(instanceID), - streamsRemovedTotal: streamsRemovedTotal.WithLabelValues(instanceID), - -- blockSize: blockSize, -- targetChunkSize: targetChunkSize, -- tailers: map[uint32]*tailer{}, -- limits: limits, -+ factory: factory, -+ tailers: map[uint32]*tailer{}, -+ limits: limits, - } - i.mapper = newFPMapper(i.getLabelsFromFingerprint) - return i -@@ -98,7 +97,7 @@ func (i *instance) consumeChunk(ctx context.Context, labels []client.LabelAdapte - stream, ok := i.streams[fp] - if !ok { - sortedLabels := i.index.Add(labels, fp) -- stream = newStream(fp, sortedLabels, i.blockSize, i.targetChunkSize) -+ stream = newStream(fp, sortedLabels, i.factory) - i.streams[fp] = stream - i.streamsCreatedTotal.Inc() - memoryStreams.Inc() -@@ -156,7 +155,7 @@ func (i *instance) getOrCreateStream(labels []client.LabelAdapter) (*stream, err - return nil, httpgrpc.Errorf(http.StatusTooManyRequests, ""per-user streams limit (%d) exceeded"", i.limits.MaxStreamsPerUser(i.instanceID)) - } - sortedLabels := i.index.Add(labels, fp) -- stream = newStream(fp, sortedLabels, i.blockSize, i.targetChunkSize) -+ stream = newStream(fp, sortedLabels, i.factory) - i.streams[fp] = stream - memoryStreams.Inc() - i.streamsCreatedTotal.Inc() -diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go -index c25b841efe6c5..39daee96374f2 100644 ---- a/pkg/ingester/instance_test.go -+++ b/pkg/ingester/instance_test.go -@@ -10,6 +10,7 @@ import ( - - ""github.com/prometheus/prometheus/pkg/labels"" - -+ ""github.com/grafana/loki/pkg/chunkenc"" - ""github.com/grafana/loki/pkg/logproto"" - - ""github.com/stretchr/testify/require"" -@@ -17,11 +18,15 @@ import ( - ""github.com/grafana/loki/pkg/util/validation"" - ) - -+var defaultFactory = func() chunkenc.Chunk { -+ return chunkenc.NewMemChunkSize(chunkenc.EncGZIP, 512, 0) -+} -+ - func TestLabelsCollisions(t *testing.T) { - o, err := validation.NewOverrides(validation.Limits{MaxStreamsPerUser: 1000}) - require.NoError(t, err) - -- i := newInstance(""test"", 512, 0, o) -+ i := newInstance(""test"", defaultFactory, o) - - // avoid entries from the future. - tt := time.Now().Add(-5 * time.Minute) -@@ -47,7 +52,7 @@ func TestConcurrentPushes(t *testing.T) { - o, err := validation.NewOverrides(validation.Limits{MaxStreamsPerUser: 1000}) - require.NoError(t, err) - -- inst := newInstance(""test"", 512, 0, o) -+ inst := newInstance(""test"", defaultFactory, o) - - const ( - concurrent = 10 -diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go -index b881b86a92aaa..706a141ff1b6e 100644 ---- a/pkg/ingester/stream.go -+++ b/pkg/ingester/stream.go -@@ -54,11 +54,10 @@ func init() { - type stream struct { - // Newest chunk at chunks[n-1]. - // Not thread-safe; assume accesses to this are locked by caller. -- chunks []chunkDesc -- fp model.Fingerprint // possibly remapped fingerprint, used in the streams map -- labels labels.Labels -- blockSize int -- targetChunkSize int // Compressed bytes -+ chunks []chunkDesc -+ fp model.Fingerprint // possibly remapped fingerprint, used in the streams map -+ labels labels.Labels -+ factory func() chunkenc.Chunk - - tailers map[uint32]*tailer - tailerMtx sync.RWMutex -@@ -77,13 +76,12 @@ type entryWithError struct { - e error - } - --func newStream(fp model.Fingerprint, labels labels.Labels, blockSize, targetChunkSize int) *stream { -+func newStream(fp model.Fingerprint, labels labels.Labels, factory func() chunkenc.Chunk) *stream { - return &stream{ -- fp: fp, -- labels: labels, -- blockSize: blockSize, -- targetChunkSize: targetChunkSize, -- tailers: map[uint32]*tailer{}, -+ fp: fp, -+ labels: labels, -+ factory: factory, -+ tailers: map[uint32]*tailer{}, - } - } - -@@ -105,7 +103,7 @@ func (s *stream) consumeChunk(_ context.Context, chunk *logproto.Chunk) error { - func (s *stream) Push(_ context.Context, entries []logproto.Entry) error { - if len(s.chunks) == 0 { - s.chunks = append(s.chunks, chunkDesc{ -- chunk: chunkenc.NewMemChunkSize(chunkenc.EncGZIP, s.blockSize, s.targetChunkSize), -+ chunk: s.factory(), - }) - chunksCreatedTotal.Inc() - } -@@ -132,7 +130,7 @@ func (s *stream) Push(_ context.Context, entries []logproto.Entry) error { - chunksCreatedTotal.Inc() - - s.chunks = append(s.chunks, chunkDesc{ -- chunk: chunkenc.NewMemChunkSize(chunkenc.EncGZIP, s.blockSize, s.targetChunkSize), -+ chunk: s.factory(), - }) - chunk = &s.chunks[len(s.chunks)-1] - } -diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE -index 74487567632c8..1eb75ef68e448 100644 ---- a/vendor/github.com/klauspost/compress/LICENSE -+++ b/vendor/github.com/klauspost/compress/LICENSE -@@ -1,4 +1,5 @@ - Copyright (c) 2012 The Go Authors. All rights reserved. -+Copyright (c) 2019 Klaus Post. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are -diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.go b/vendor/github.com/klauspost/compress/flate/crc32_amd64.go -deleted file mode 100644 -index 8298d309aefaa..0000000000000 ---- a/vendor/github.com/klauspost/compress/flate/crc32_amd64.go -+++ /dev/null -@@ -1,42 +0,0 @@ --//+build !noasm --//+build !appengine --//+build !gccgo -- --// Copyright 2015, Klaus Post, see LICENSE for details. -- --package flate -- --import ( -- ""github.com/klauspost/cpuid"" --) -- --// crc32sse returns a hash for the first 4 bytes of the slice --// len(a) must be >= 4. --//go:noescape --func crc32sse(a []byte) uint32 -- --// crc32sseAll calculates hashes for each 4-byte set in a. --// dst must be east len(a) - 4 in size. --// The size is not checked by the assembly. --//go:noescape --func crc32sseAll(a []byte, dst []uint32) -- --// matchLenSSE4 returns the number of matching bytes in a and b --// up to length 'max'. Both slices must be at least 'max' --// bytes in size. --// --// TODO: drop the ""SSE4"" name, since it doesn't use any SSE instructions. --// --//go:noescape --func matchLenSSE4(a, b []byte, max int) int -- --// histogram accumulates a histogram of b in h. --// h must be at least 256 entries in length, --// and must be cleared before calling this function. --//go:noescape --func histogram(b []byte, h []int32) -- --// Detect SSE 4.2 feature. --func init() { -- useSSE42 = cpuid.CPU.SSE42() --} -diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.s b/vendor/github.com/klauspost/compress/flate/crc32_amd64.s -deleted file mode 100644 -index a7994372702b7..0000000000000 ---- a/vendor/github.com/klauspost/compress/flate/crc32_amd64.s -+++ /dev/null -@@ -1,214 +0,0 @@ --//+build !noasm --//+build !appengine --//+build !gccgo -- --// Copyright 2015, Klaus Post, see LICENSE for details. -- --// func crc32sse(a []byte) uint32 --TEXT ·crc32sse(SB), 4, $0 -- MOVQ a+0(FP), R10 -- XORQ BX, BX -- -- // CRC32 dword (R10), EBX -- BYTE $0xF2; BYTE $0x41; BYTE $0x0f -- BYTE $0x38; BYTE $0xf1; BYTE $0x1a -- -- MOVL BX, ret+24(FP) -- RET -- --// func crc32sseAll(a []byte, dst []uint32) --TEXT ·crc32sseAll(SB), 4, $0 -- MOVQ a+0(FP), R8 // R8: src -- MOVQ a_len+8(FP), R10 // input length -- MOVQ dst+24(FP), R9 // R9: dst -- SUBQ $4, R10 -- JS end -- JZ one_crc -- MOVQ R10, R13 -- SHRQ $2, R10 // len/4 -- ANDQ $3, R13 // len&3 -- XORQ BX, BX -- ADDQ $1, R13 -- TESTQ R10, R10 -- JZ rem_loop -- --crc_loop: -- MOVQ (R8), R11 -- XORQ BX, BX -- XORQ DX, DX -- XORQ DI, DI -- MOVQ R11, R12 -- SHRQ $8, R11 -- MOVQ R12, AX -- MOVQ R11, CX -- SHRQ $16, R12 -- SHRQ $16, R11 -- MOVQ R12, SI -- -- // CRC32 EAX, EBX -- BYTE $0xF2; BYTE $0x0f -- BYTE $0x38; BYTE $0xf1; BYTE $0xd8 -- -- // CRC32 ECX, EDX -- BYTE $0xF2; BYTE $0x0f -- BYTE $0x38; BYTE $0xf1; BYTE $0xd1 -- -- // CRC32 ESI, EDI -- BYTE $0xF2; BYTE $0x0f -- BYTE $0x38; BYTE $0xf1; BYTE $0xfe -- MOVL BX, (R9) -- MOVL DX, 4(R9) -- MOVL DI, 8(R9) -- -- XORQ BX, BX -- MOVL R11, AX -- -- // CRC32 EAX, EBX -- BYTE $0xF2; BYTE $0x0f -- BYTE $0x38; BYTE $0xf1; BYTE $0xd8 -- MOVL BX, 12(R9) -- -- ADDQ $16, R9 -- ADDQ $4, R8 -- XORQ BX, BX -- SUBQ $1, R10 -- JNZ crc_loop -- --rem_loop: -- MOVL (R8), AX -- -- // CRC32 EAX, EBX -- BYTE $0xF2; BYTE $0x0f -- BYTE $0x38; BYTE $0xf1; BYTE $0xd8 -- -- MOVL BX, (R9) -- ADDQ $4, R9 -- ADDQ $1, R8 -- XORQ BX, BX -- SUBQ $1, R13 -- JNZ rem_loop -- --end: -- RET -- --one_crc: -- MOVQ $1, R13 -- XORQ BX, BX -- JMP rem_loop -- --// func matchLenSSE4(a, b []byte, max int) int --TEXT ·matchLenSSE4(SB), 4, $0 -- MOVQ a_base+0(FP), SI -- MOVQ b_base+24(FP), DI -- MOVQ DI, DX -- MOVQ max+48(FP), CX -- --cmp8: -- // As long as we are 8 or more bytes before the end of max, we can load and -- // compare 8 bytes at a time. If those 8 bytes are equal, repeat. -- CMPQ CX, $8 -- JLT cmp1 -- MOVQ (SI), AX -- MOVQ (DI), BX -- CMPQ AX, BX -- JNE bsf -- ADDQ $8, SI -- ADDQ $8, DI -- SUBQ $8, CX -- JMP cmp8 -- --bsf: -- // If those 8 bytes were not equal, XOR the two 8 byte values, and return -- // the index of the first byte that differs. The BSF instruction finds the -- // least significant 1 bit, the amd64 architecture is little-endian, and -- // the shift by 3 converts a bit index to a byte index. -- XORQ AX, BX -- BSFQ BX, BX -- SHRQ $3, BX -- ADDQ BX, DI -- -- // Subtract off &b[0] to convert from &b[ret] to ret, and return. -- SUBQ DX, DI -- MOVQ DI, ret+56(FP) -- RET -- --cmp1: -- // In the slices' tail, compare 1 byte at a time. -- CMPQ CX, $0 -- JEQ matchLenEnd -- MOVB (SI), AX -- MOVB (DI), BX -- CMPB AX, BX -- JNE matchLenEnd -- ADDQ $1, SI -- ADDQ $1, DI -- SUBQ $1, CX -- JMP cmp1 -- --matchLenEnd: -- // Subtract off &b[0] to convert from &b[ret] to ret, and return. -- SUBQ DX, DI -- MOVQ DI, ret+56(FP) -- RET -- --// func histogram(b []byte, h []int32) --TEXT ·histogram(SB), 4, $0 -- MOVQ b+0(FP), SI // SI: &b -- MOVQ b_len+8(FP), R9 // R9: len(b) -- MOVQ h+24(FP), DI // DI: Histogram -- MOVQ R9, R8 -- SHRQ $3, R8 -- JZ hist1 -- XORQ R11, R11 -- --loop_hist8: -- MOVQ (SI), R10 -- -- MOVB R10, R11 -- INCL (DI)(R11*4) -- SHRQ $8, R10 -- -- MOVB R10, R11 -- INCL (DI)(R11*4) -- SHRQ $8, R10 -- -- MOVB R10, R11 -- INCL (DI)(R11*4) -- SHRQ $8, R10 -- -- MOVB R10, R11 -- INCL (DI)(R11*4) -- SHRQ $8, R10 -- -- MOVB R10, R11 -- INCL (DI)(R11*4) -- SHRQ $8, R10 -- -- MOVB R10, R11 -- INCL (DI)(R11*4) -- SHRQ $8, R10 -- -- MOVB R10, R11 -- INCL (DI)(R11*4) -- SHRQ $8, R10 -- -- INCL (DI)(R10*4) -- -- ADDQ $8, SI -- DECQ R8 -- JNZ loop_hist8 -- --hist1: -- ANDQ $7, R9 -- JZ end_hist -- XORQ R10, R10 -- --loop_hist1: -- MOVB (SI), R10 -- INCL (DI)(R10*4) -- INCQ SI -- DECQ R9 -- JNZ loop_hist1 -- --end_hist: -- RET -diff --git a/vendor/github.com/klauspost/compress/flate/crc32_noasm.go b/vendor/github.com/klauspost/compress/flate/crc32_noasm.go -deleted file mode 100644 -index dcf43bd50a80e..0000000000000 ---- a/vendor/github.com/klauspost/compress/flate/crc32_noasm.go -+++ /dev/null -@@ -1,35 +0,0 @@ --//+build !amd64 noasm appengine gccgo -- --// Copyright 2015, Klaus Post, see LICENSE for details. -- --package flate -- --func init() { -- useSSE42 = false --} -- --// crc32sse should never be called. --func crc32sse(a []byte) uint32 { -- panic(""no assembler"") --} -- --// crc32sseAll should never be called. --func crc32sseAll(a []byte, dst []uint32) { -- panic(""no assembler"") --} -- --// matchLenSSE4 should never be called. --func matchLenSSE4(a, b []byte, max int) int { -- panic(""no assembler"") -- return 0 --} -- --// histogram accumulates a histogram of b in h. --// --// len(h) must be >= 256, and h's elements must be all zeroes. --func histogram(b []byte, h []int32) { -- h = h[:256] -- for _, t := range b { -- h[t]++ -- } --} -diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go -index 6287951204e8d..20c94f5968439 100644 ---- a/vendor/github.com/klauspost/compress/flate/deflate.go -+++ b/vendor/github.com/klauspost/compress/flate/deflate.go -@@ -50,8 +50,6 @@ const ( - skipNever = math.MaxInt32 - ) - --var useSSE42 bool -- - type compressionLevel struct { - good, lazy, nice, chain, fastSkipHashing, level int - } -@@ -97,9 +95,8 @@ type advancedState struct { - hashOffset int - - // input window: unprocessed data is window[index:windowEnd] -- index int -- bulkHasher func([]byte, []uint32) -- hashMatch [maxMatchLength + minMatchLength]uint32 -+ index int -+ hashMatch [maxMatchLength + minMatchLength]uint32 - } - - type compressor struct { -@@ -120,7 +117,7 @@ type compressor struct { - - // queued output tokens - tokens tokens -- snap fastEnc -+ fast fastEnc - state *advancedState - } - -@@ -164,14 +161,14 @@ func (d *compressor) fillDeflate(b []byte) int { - return n - } - --func (d *compressor) writeBlock(tok tokens, index int, eof bool) error { -+func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { - if index > 0 || eof { - var window []byte - if d.blockStart <= index { - window = d.window[d.blockStart:index] - } - d.blockStart = index -- d.w.writeBlock(tok.tokens[:tok.n], eof, window) -+ d.w.writeBlock(tok, eof, window) - return d.w.err - } - return nil -@@ -180,20 +177,20 @@ func (d *compressor) writeBlock(tok tokens, index int, eof bool) error { - // writeBlockSkip writes the current block and uses the number of tokens - // to determine if the block should be stored on no matches, or - // only huffman encoded. --func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error { -+func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { - if index > 0 || eof { - if d.blockStart <= index { - window := d.window[d.blockStart:index] - // If we removed less than a 64th of all literals - // we huffman compress the block. - if int(tok.n) > len(window)-int(tok.n>>6) { -- d.w.writeBlockHuff(eof, window) -+ d.w.writeBlockHuff(eof, window, d.sync) - } else { - // Write a dynamic huffman block. -- d.w.writeBlockDynamic(tok.tokens[:tok.n], eof, window) -+ d.w.writeBlockDynamic(tok, eof, window, d.sync) - } - } else { -- d.w.writeBlock(tok.tokens[:tok.n], eof, nil) -+ d.w.writeBlock(tok, eof, nil) - } - d.blockStart = index - return d.w.err -@@ -208,8 +205,16 @@ func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error { - func (d *compressor) fillWindow(b []byte) { - // Do not fill window if we are in store-only mode, - // use constant or Snappy compression. -- switch d.compressionLevel.level { -- case 0, 1, 2: -+ if d.level == 0 { -+ return -+ } -+ if d.fast != nil { -+ // encode the last data, but discard the result -+ if len(b) > maxMatchOffset { -+ b = b[len(b)-maxMatchOffset:] -+ } -+ d.fast.Encode(&d.tokens, b) -+ d.tokens.Reset() - return - } - s := d.state -@@ -236,7 +241,7 @@ func (d *compressor) fillWindow(b []byte) { - } - - dst := s.hashMatch[:dstSize] -- s.bulkHasher(tocheck, dst) -+ bulkHash4(tocheck, dst) - var newH uint32 - for i, val := range dst { - di := i + startindex -@@ -284,62 +289,7 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead - - for i := prevHead; tries > 0; tries-- { - if wEnd == win[i+length] { -- n := matchLen(win[i:], wPos, minMatchLook) -- -- if n > length && (n > minMatchLength || pos-i <= 4096) { -- length = n -- offset = pos - i -- ok = true -- if n >= nice { -- // The match is good enough that we don't try to find a better one. -- break -- } -- wEnd = win[pos+n] -- } -- } -- if i == minIndex { -- // hashPrev[i & windowMask] has already been overwritten, so stop now. -- break -- } -- i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset -- if i < minIndex || i < 0 { -- break -- } -- } -- return --} -- --// Try to find a match starting at index whose length is greater than prevSize. --// We only look at chainCount possibilities before giving up. --// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead --func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { -- minMatchLook := maxMatchLength -- if lookahead < minMatchLook { -- minMatchLook = lookahead -- } -- -- win := d.window[0 : pos+minMatchLook] -- -- // We quit when we get a match that's at least nice long -- nice := len(win) - pos -- if d.nice < nice { -- nice = d.nice -- } -- -- // If we've got a match that's good enough, only look in 1/4 the chain. -- tries := d.chain -- length = prevLength -- if length >= d.good { -- tries >>= 2 -- } -- -- wEnd := win[pos+length] -- wPos := win[pos:] -- minIndex := pos - windowSize -- -- for i := prevHead; tries > 0; tries-- { -- if wEnd == win[i+length] { -- n := matchLenSSE4(win[i:], wPos, minMatchLook) -+ n := matchLen(win[i:i+minMatchLook], wPos) - - if n > length && (n > minMatchLength || pos-i <= 4096) { - length = n -@@ -372,42 +322,27 @@ func (d *compressor) writeStoredBlock(buf []byte) error { - return d.w.err - } - --const hashmul = 0x1e35a7bd -- - // hash4 returns a hash representation of the first 4 bytes - // of the supplied slice. - // The caller must ensure that len(b) >= 4. - func hash4(b []byte) uint32 { -- return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits) -+ b = b[:4] -+ return hash4u(uint32(b[3])|uint32(b[2])<<8|uint32(b[1])<<16|uint32(b[0])<<24, hashBits) - } - - // bulkHash4 will compute hashes using the same - // algorithm as hash4 - func bulkHash4(b []byte, dst []uint32) { -- if len(b) < minMatchLength { -+ if len(b) < 4 { - return - } - hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 -- dst[0] = (hb * hashmul) >> (32 - hashBits) -- end := len(b) - minMatchLength + 1 -+ dst[0] = hash4u(hb, hashBits) -+ end := len(b) - 4 + 1 - for i := 1; i < end; i++ { - hb = (hb << 8) | uint32(b[i+3]) -- dst[i] = (hb * hashmul) >> (32 - hashBits) -- } --} -- --// matchLen returns the number of matching bytes in a and b --// up to length 'max'. Both slices must be at least 'max' --// bytes in size. --func matchLen(a, b []byte, max int) int { -- a = a[:max] -- b = b[:len(a)] -- for i, av := range a { -- if b[i] != av { -- return i -- } -+ dst[i] = hash4u(hb, hashBits) - } -- return max - } - - func (d *compressor) initDeflate() { -@@ -424,149 +359,6 @@ func (d *compressor) initDeflate() { - s.offset = 0 - s.hash = 0 - s.chainHead = -1 -- s.bulkHasher = bulkHash4 -- if useSSE42 { -- s.bulkHasher = crc32sseAll -- } --} -- --// Assumes that d.fastSkipHashing != skipNever, --// otherwise use deflateLazy --func (d *compressor) deflate() { -- s := d.state -- // Sanity enables additional runtime tests. -- // It's intended to be used during development -- // to supplement the currently ad-hoc unit tests. -- const sanity = false -- -- if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { -- return -- } -- -- s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) -- if s.index < s.maxInsertIndex { -- s.hash = hash4(d.window[s.index : s.index+minMatchLength]) -- } -- -- for { -- if sanity && s.index > d.windowEnd { -- panic(""index > windowEnd"") -- } -- lookahead := d.windowEnd - s.index -- if lookahead < minMatchLength+maxMatchLength { -- if !d.sync { -- return -- } -- if sanity && s.index > d.windowEnd { -- panic(""index > windowEnd"") -- } -- if lookahead == 0 { -- if d.tokens.n > 0 { -- if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil { -- return -- } -- d.tokens.n = 0 -- } -- return -- } -- } -- if s.index < s.maxInsertIndex { -- // Update the hash -- s.hash = hash4(d.window[s.index : s.index+minMatchLength]) -- ch := s.hashHead[s.hash&hashMask] -- s.chainHead = int(ch) -- s.hashPrev[s.index&windowMask] = ch -- s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset) -- } -- s.length = minMatchLength - 1 -- s.offset = 0 -- minIndex := s.index - windowSize -- if minIndex < 0 { -- minIndex = 0 -- } -- -- if s.chainHead-s.hashOffset >= minIndex && lookahead > minMatchLength-1 { -- if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok { -- s.length = newLength -- s.offset = newOffset -- } -- } -- if s.length >= minMatchLength { -- s.ii = 0 -- // There was a match at the previous step, and the current match is -- // not better. Output the previous match. -- // ""s.length-3"" should NOT be ""s.length-minMatchLength"", since the format always assume 3 -- d.tokens.tokens[d.tokens.n] = matchToken(uint32(s.length-3), uint32(s.offset-minOffsetSize)) -- d.tokens.n++ -- // Insert in the hash table all strings up to the end of the match. -- // index and index-1 are already inserted. If there is not enough -- // lookahead, the last two strings are not inserted into the hash -- // table. -- if s.length <= d.fastSkipHashing { -- var newIndex int -- newIndex = s.index + s.length -- // Calculate missing hashes -- end := newIndex -- if end > s.maxInsertIndex { -- end = s.maxInsertIndex -- } -- end += minMatchLength - 1 -- startindex := s.index + 1 -- if startindex > s.maxInsertIndex { -- startindex = s.maxInsertIndex -- } -- tocheck := d.window[startindex:end] -- dstSize := len(tocheck) - minMatchLength + 1 -- if dstSize > 0 { -- dst := s.hashMatch[:dstSize] -- bulkHash4(tocheck, dst) -- var newH uint32 -- for i, val := range dst { -- di := i + startindex -- newH = val & hashMask -- // Get previous value with the same hash. -- // Our chain should point to the previous value. -- s.hashPrev[di&windowMask] = s.hashHead[newH] -- // Set the head of the hash chain to us. -- s.hashHead[newH] = uint32(di + s.hashOffset) -- } -- s.hash = newH -- } -- s.index = newIndex -- } else { -- // For matches this long, we don't bother inserting each individual -- // item into the table. -- s.index += s.length -- if s.index < s.maxInsertIndex { -- s.hash = hash4(d.window[s.index : s.index+minMatchLength]) -- } -- } -- if d.tokens.n == maxFlateBlockTokens { -- // The block includes the current character -- if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil { -- return -- } -- d.tokens.n = 0 -- } -- } else { -- s.ii++ -- end := s.index + int(s.ii>>uint(d.fastSkipHashing)) + 1 -- if end > d.windowEnd { -- end = d.windowEnd -- } -- for i := s.index; i < end; i++ { -- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) -- d.tokens.n++ -- if d.tokens.n == maxFlateBlockTokens { -- if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil { -- return -- } -- d.tokens.n = 0 -- } -- } -- s.index = end -- } -- } - } - - // deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, -@@ -603,15 +395,14 @@ func (d *compressor) deflateLazy() { - // Flush current output block if any. - if d.byteAvailable { - // There is still one pending token that needs to be flushed -- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) -- d.tokens.n++ -+ d.tokens.AddLiteral(d.window[s.index-1]) - d.byteAvailable = false - } - if d.tokens.n > 0 { -- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { -+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } -- d.tokens.n = 0 -+ d.tokens.Reset() - } - return - } -@@ -642,8 +433,7 @@ func (d *compressor) deflateLazy() { - if prevLength >= minMatchLength && s.length <= prevLength { - // There was a match at the previous step, and the current match is - // not better. Output the previous match. -- d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) -- d.tokens.n++ -+ d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) - - // Insert in the hash table all strings up to the end of the match. - // index and index-1 are already inserted. If there is not enough -@@ -684,10 +474,10 @@ func (d *compressor) deflateLazy() { - s.length = minMatchLength - 1 - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character -- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { -+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } -- d.tokens.n = 0 -+ d.tokens.Reset() - } - } else { - // Reset, if we got a match this run. -@@ -697,13 +487,12 @@ func (d *compressor) deflateLazy() { - // We have a byte waiting. Emit it. - if d.byteAvailable { - s.ii++ -- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) -- d.tokens.n++ -+ d.tokens.AddLiteral(d.window[s.index-1]) - if d.tokens.n == maxFlateBlockTokens { -- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { -+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } -- d.tokens.n = 0 -+ d.tokens.Reset() - } - s.index++ - -@@ -716,343 +505,24 @@ func (d *compressor) deflateLazy() { - break - } - -- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) -- d.tokens.n++ -+ d.tokens.AddLiteral(d.window[s.index-1]) - if d.tokens.n == maxFlateBlockTokens { -- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { -+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } -- d.tokens.n = 0 -+ d.tokens.Reset() - } - s.index++ - } - // Flush last byte -- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) -- d.tokens.n++ -+ d.tokens.AddLiteral(d.window[s.index-1]) - d.byteAvailable = false - // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength - if d.tokens.n == maxFlateBlockTokens { -- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { -+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } -- d.tokens.n = 0 -- } -- } -- } else { -- s.index++ -- d.byteAvailable = true -- } -- } -- } --} -- --// Assumes that d.fastSkipHashing != skipNever, --// otherwise use deflateLazySSE --func (d *compressor) deflateSSE() { -- s := d.state -- // Sanity enables additional runtime tests. -- // It's intended to be used during development -- // to supplement the currently ad-hoc unit tests. -- const sanity = false -- -- if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { -- return -- } -- -- s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) -- if s.index < s.maxInsertIndex { -- s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask -- } -- -- for { -- if sanity && s.index > d.windowEnd { -- panic(""index > windowEnd"") -- } -- lookahead := d.windowEnd - s.index -- if lookahead < minMatchLength+maxMatchLength { -- if !d.sync { -- return -- } -- if sanity && s.index > d.windowEnd { -- panic(""index > windowEnd"") -- } -- if lookahead == 0 { -- if d.tokens.n > 0 { -- if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil { -- return -- } -- d.tokens.n = 0 -- } -- return -- } -- } -- if s.index < s.maxInsertIndex { -- // Update the hash -- s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask -- ch := s.hashHead[s.hash] -- s.chainHead = int(ch) -- s.hashPrev[s.index&windowMask] = ch -- s.hashHead[s.hash] = uint32(s.index + s.hashOffset) -- } -- s.length = minMatchLength - 1 -- s.offset = 0 -- minIndex := s.index - windowSize -- if minIndex < 0 { -- minIndex = 0 -- } -- -- if s.chainHead-s.hashOffset >= minIndex && lookahead > minMatchLength-1 { -- if newLength, newOffset, ok := d.findMatchSSE(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok { -- s.length = newLength -- s.offset = newOffset -- } -- } -- if s.length >= minMatchLength { -- s.ii = 0 -- // There was a match at the previous step, and the current match is -- // not better. Output the previous match. -- // ""s.length-3"" should NOT be ""s.length-minMatchLength"", since the format always assume 3 -- d.tokens.tokens[d.tokens.n] = matchToken(uint32(s.length-3), uint32(s.offset-minOffsetSize)) -- d.tokens.n++ -- // Insert in the hash table all strings up to the end of the match. -- // index and index-1 are already inserted. If there is not enough -- // lookahead, the last two strings are not inserted into the hash -- // table. -- if s.length <= d.fastSkipHashing { -- var newIndex int -- newIndex = s.index + s.length -- // Calculate missing hashes -- end := newIndex -- if end > s.maxInsertIndex { -- end = s.maxInsertIndex -- } -- end += minMatchLength - 1 -- startindex := s.index + 1 -- if startindex > s.maxInsertIndex { -- startindex = s.maxInsertIndex -- } -- tocheck := d.window[startindex:end] -- dstSize := len(tocheck) - minMatchLength + 1 -- if dstSize > 0 { -- dst := s.hashMatch[:dstSize] -- -- crc32sseAll(tocheck, dst) -- var newH uint32 -- for i, val := range dst { -- di := i + startindex -- newH = val & hashMask -- // Get previous value with the same hash. -- // Our chain should point to the previous value. -- s.hashPrev[di&windowMask] = s.hashHead[newH] -- // Set the head of the hash chain to us. -- s.hashHead[newH] = uint32(di + s.hashOffset) -- } -- s.hash = newH -- } -- s.index = newIndex -- } else { -- // For matches this long, we don't bother inserting each individual -- // item into the table. -- s.index += s.length -- if s.index < s.maxInsertIndex { -- s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask -- } -- } -- if d.tokens.n == maxFlateBlockTokens { -- // The block includes the current character -- if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil { -- return -- } -- d.tokens.n = 0 -- } -- } else { -- s.ii++ -- end := s.index + int(s.ii>>5) + 1 -- if end > d.windowEnd { -- end = d.windowEnd -- } -- for i := s.index; i < end; i++ { -- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) -- d.tokens.n++ -- if d.tokens.n == maxFlateBlockTokens { -- if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil { -- return -- } -- d.tokens.n = 0 -- } -- } -- s.index = end -- } -- } --} -- --// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, --// meaning it always has lazy matching on. --func (d *compressor) deflateLazySSE() { -- s := d.state -- // Sanity enables additional runtime tests. -- // It's intended to be used during development -- // to supplement the currently ad-hoc unit tests. -- const sanity = false -- -- if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { -- return -- } -- -- s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) -- if s.index < s.maxInsertIndex { -- s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask -- } -- -- for { -- if sanity && s.index > d.windowEnd { -- panic(""index > windowEnd"") -- } -- lookahead := d.windowEnd - s.index -- if lookahead < minMatchLength+maxMatchLength { -- if !d.sync { -- return -- } -- if sanity && s.index > d.windowEnd { -- panic(""index > windowEnd"") -- } -- if lookahead == 0 { -- // Flush current output block if any. -- if d.byteAvailable { -- // There is still one pending token that needs to be flushed -- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) -- d.tokens.n++ -- d.byteAvailable = false -- } -- if d.tokens.n > 0 { -- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { -- return -- } -- d.tokens.n = 0 -- } -- return -- } -- } -- if s.index < s.maxInsertIndex { -- // Update the hash -- s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask -- ch := s.hashHead[s.hash] -- s.chainHead = int(ch) -- s.hashPrev[s.index&windowMask] = ch -- s.hashHead[s.hash] = uint32(s.index + s.hashOffset) -- } -- prevLength := s.length -- prevOffset := s.offset -- s.length = minMatchLength - 1 -- s.offset = 0 -- minIndex := s.index - windowSize -- if minIndex < 0 { -- minIndex = 0 -- } -- -- if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { -- if newLength, newOffset, ok := d.findMatchSSE(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok { -- s.length = newLength -- s.offset = newOffset -- } -- } -- if prevLength >= minMatchLength && s.length <= prevLength { -- // There was a match at the previous step, and the current match is -- // not better. Output the previous match. -- d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) -- d.tokens.n++ -- -- // Insert in the hash table all strings up to the end of the match. -- // index and index-1 are already inserted. If there is not enough -- // lookahead, the last two strings are not inserted into the hash -- // table. -- var newIndex int -- newIndex = s.index + prevLength - 1 -- // Calculate missing hashes -- end := newIndex -- if end > s.maxInsertIndex { -- end = s.maxInsertIndex -- } -- end += minMatchLength - 1 -- startindex := s.index + 1 -- if startindex > s.maxInsertIndex { -- startindex = s.maxInsertIndex -- } -- tocheck := d.window[startindex:end] -- dstSize := len(tocheck) - minMatchLength + 1 -- if dstSize > 0 { -- dst := s.hashMatch[:dstSize] -- crc32sseAll(tocheck, dst) -- var newH uint32 -- for i, val := range dst { -- di := i + startindex -- newH = val & hashMask -- // Get previous value with the same hash. -- // Our chain should point to the previous value. -- s.hashPrev[di&windowMask] = s.hashHead[newH] -- // Set the head of the hash chain to us. -- s.hashHead[newH] = uint32(di + s.hashOffset) -- } -- s.hash = newH -- } -- -- s.index = newIndex -- d.byteAvailable = false -- s.length = minMatchLength - 1 -- if d.tokens.n == maxFlateBlockTokens { -- // The block includes the current character -- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { -- return -- } -- d.tokens.n = 0 -- } -- } else { -- // Reset, if we got a match this run. -- if s.length >= minMatchLength { -- s.ii = 0 -- } -- // We have a byte waiting. Emit it. -- if d.byteAvailable { -- s.ii++ -- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) -- d.tokens.n++ -- if d.tokens.n == maxFlateBlockTokens { -- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { -- return -- } -- d.tokens.n = 0 -- } -- s.index++ -- -- // If we have a long run of no matches, skip additional bytes -- // Resets when s.ii overflows after 64KB. -- if s.ii > 31 { -- n := int(s.ii >> 6) -- for j := 0; j < n; j++ { -- if s.index >= d.windowEnd-1 { -- break -- } -- -- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) -- d.tokens.n++ -- if d.tokens.n == maxFlateBlockTokens { -- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { -- return -- } -- d.tokens.n = 0 -- } -- s.index++ -- } -- // Flush last byte -- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) -- d.tokens.n++ -- d.byteAvailable = false -- // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength -- if d.tokens.n == maxFlateBlockTokens { -- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { -- return -- } -- d.tokens.n = 0 -+ d.tokens.Reset() - } - } - } else { -@@ -1085,17 +555,17 @@ func (d *compressor) storeHuff() { - if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { - return - } -- d.w.writeBlockHuff(false, d.window[:d.windowEnd]) -+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) - d.err = d.w.err - d.windowEnd = 0 - } - --// storeHuff will compress and store the currently added data, -+// storeFast will compress and store the currently added data, - // if enough has been accumulated or we at the end of the stream. - // Any error that occurred will be in d.err --func (d *compressor) storeSnappy() { -+func (d *compressor) storeFast() { - // We only compress if we have maxStoreBlockSize. -- if d.windowEnd < maxStoreBlockSize { -+ if d.windowEnd < len(d.window) { - if !d.sync { - return - } -@@ -1106,32 +576,30 @@ func (d *compressor) storeSnappy() { - } - if d.windowEnd <= 32 { - d.err = d.writeStoredBlock(d.window[:d.windowEnd]) -- d.tokens.n = 0 -- d.windowEnd = 0 - } else { -- d.w.writeBlockHuff(false, d.window[:d.windowEnd]) -+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], true) - d.err = d.w.err - } -- d.tokens.n = 0 -+ d.tokens.Reset() - d.windowEnd = 0 -- d.snap.Reset() -+ d.fast.Reset() - return - } - } - -- d.snap.Encode(&d.tokens, d.window[:d.windowEnd]) -+ d.fast.Encode(&d.tokens, d.window[:d.windowEnd]) - // If we made zero matches, store the block as is. -- if int(d.tokens.n) == d.windowEnd { -+ if d.tokens.n == 0 { - d.err = d.writeStoredBlock(d.window[:d.windowEnd]) - // If we removed less than 1/16th, huffman compress the block. - } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { -- d.w.writeBlockHuff(false, d.window[:d.windowEnd]) -+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) - d.err = d.w.err - } else { -- d.w.writeBlockDynamic(d.tokens.tokens[:d.tokens.n], false, d.window[:d.windowEnd]) -+ d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync) - d.err = d.w.err - } -- d.tokens.n = 0 -+ d.tokens.Reset() - d.windowEnd = 0 - } - -@@ -1176,36 +644,26 @@ func (d *compressor) init(w io.Writer, level int) (err error) { - d.fill = (*compressor).fillBlock - d.step = (*compressor).store - case level == ConstantCompression: -+ d.w.logReusePenalty = uint(4) - d.window = make([]byte, maxStoreBlockSize) - d.fill = (*compressor).fillBlock - d.step = (*compressor).storeHuff -- case level >= 1 && level <= 4: -- d.snap = newFastEnc(level) -- d.window = make([]byte, maxStoreBlockSize) -- d.fill = (*compressor).fillBlock -- d.step = (*compressor).storeSnappy - case level == DefaultCompression: - level = 5 - fallthrough -- case 5 <= level && level <= 9: -+ case level >= 1 && level <= 6: -+ d.w.logReusePenalty = uint(level + 1) -+ d.fast = newFastEnc(level) -+ d.window = make([]byte, maxStoreBlockSize) -+ d.fill = (*compressor).fillBlock -+ d.step = (*compressor).storeFast -+ case 7 <= level && level <= 9: -+ d.w.logReusePenalty = uint(level) - d.state = &advancedState{} - d.compressionLevel = levels[level] - d.initDeflate() - d.fill = (*compressor).fillDeflate -- if d.fastSkipHashing == skipNever { -- if useSSE42 { -- d.step = (*compressor).deflateLazySSE -- } else { -- d.step = (*compressor).deflateLazy -- } -- } else { -- if useSSE42 { -- d.step = (*compressor).deflateSSE -- } else { -- d.step = (*compressor).deflate -- -- } -- } -+ d.step = (*compressor).deflateLazy - default: - return fmt.Errorf(""flate: invalid compression level %d: want value in range [-2, 9]"", level) - } -@@ -1218,10 +676,10 @@ func (d *compressor) reset(w io.Writer) { - d.sync = false - d.err = nil - // We only need to reset a few things for Snappy. -- if d.snap != nil { -- d.snap.Reset() -+ if d.fast != nil { -+ d.fast.Reset() - d.windowEnd = 0 -- d.tokens.n = 0 -+ d.tokens.Reset() - return - } - switch d.compressionLevel.chain { -@@ -1240,7 +698,7 @@ func (d *compressor) reset(w io.Writer) { - s.hashOffset = 1 - s.index, d.windowEnd = 0, 0 - d.blockStart, d.byteAvailable = 0, false -- d.tokens.n = 0 -+ d.tokens.Reset() - s.length = minMatchLength - 1 - s.offset = 0 - s.hash = 0 -diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go -new file mode 100644 -index 0000000000000..b0a470f92e0eb ---- /dev/null -+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go -@@ -0,0 +1,257 @@ -+// Copyright 2011 The Snappy-Go Authors. All rights reserved. -+// Modified for deflate by Klaus Post (c) 2015. -+// Use of this source code is governed by a BSD-style -+// license that can be found in the LICENSE file. -+ -+package flate -+ -+import ( -+ ""fmt"" -+ ""math/bits"" -+) -+ -+type fastEnc interface { -+ Encode(dst *tokens, src []byte) -+ Reset() -+} -+ -+func newFastEnc(level int) fastEnc { -+ switch level { -+ case 1: -+ return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}} -+ case 2: -+ return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}} -+ case 3: -+ return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}} -+ case 4: -+ return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}} -+ case 5: -+ return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}} -+ case 6: -+ return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}} -+ default: -+ panic(""invalid level specified"") -+ } -+} -+ -+const ( -+ tableBits = 16 // Bits used in the table -+ tableSize = 1 << tableBits // Size of the table -+ tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. -+ baseMatchOffset = 1 // The smallest match offset -+ baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 -+ maxMatchOffset = 1 << 15 // The largest match offset -+ -+ bTableBits = 18 // Bits used in the big tables -+ bTableSize = 1 << bTableBits // Size of the table -+ allocHistory = maxMatchOffset * 10 // Size to preallocate for history. -+ bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize // Reset the buffer offset when reaching this. -+) -+ -+const ( -+ prime3bytes = 506832829 -+ prime4bytes = 2654435761 -+ prime5bytes = 889523592379 -+ prime6bytes = 227718039650203 -+ prime7bytes = 58295818150454627 -+ prime8bytes = 0xcf1bbcdcb7a56463 -+) -+ -+func load32(b []byte, i int) uint32 { -+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read. -+ b = b[i:] -+ b = b[:4] -+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -+} -+ -+func load64(b []byte, i int) uint64 { -+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read. -+ b = b[i:] -+ b = b[:8] -+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | -+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -+} -+ -+func load3232(b []byte, i int32) uint32 { -+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read. -+ b = b[i:] -+ b = b[:4] -+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -+} -+ -+func load6432(b []byte, i int32) uint64 { -+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read. -+ b = b[i:] -+ b = b[:8] -+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | -+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -+} -+ -+func hash(u uint32) uint32 { -+ return (u * 0x1e35a7bd) >> tableShift -+} -+ -+type tableEntry struct { -+ val uint32 -+ offset int32 -+} -+ -+// fastGen maintains the table for matches, -+// and the previous byte block for level 2. -+// This is the generic implementation. -+type fastGen struct { -+ hist []byte -+ cur int32 -+} -+ -+func (e *fastGen) addBlock(src []byte) int32 { -+ // check if we have space already -+ if len(e.hist)+len(src) > cap(e.hist) { -+ if cap(e.hist) == 0 { -+ e.hist = make([]byte, 0, allocHistory) -+ } else { -+ if cap(e.hist) < maxMatchOffset*2 { -+ panic(""unexpected buffer size"") -+ } -+ // Move down -+ offset := int32(len(e.hist)) - maxMatchOffset -+ copy(e.hist[0:maxMatchOffset], e.hist[offset:]) -+ e.cur += offset -+ e.hist = e.hist[:maxMatchOffset] -+ } -+ } -+ s := int32(len(e.hist)) -+ e.hist = append(e.hist, src...) -+ return s -+} -+ -+// hash4 returns the hash of u to fit in a hash table with h bits. -+// Preferably h should be a constant and should always be <32. -+func hash4u(u uint32, h uint8) uint32 { -+ return (u * prime4bytes) >> ((32 - h) & 31) -+} -+ -+type tableEntryPrev struct { -+ Cur tableEntry -+ Prev tableEntry -+} -+ -+// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. -+// Preferably h should be a constant and should always be <32. -+func hash4x64(u uint64, h uint8) uint32 { -+ return (uint32(u) * prime4bytes) >> ((32 - h) & 31) -+} -+ -+// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. -+// Preferably h should be a constant and should always be <64. -+func hash7(u uint64, h uint8) uint32 { -+ return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) -+} -+ -+// hash8 returns the hash of u to fit in a hash table with h bits. -+// Preferably h should be a constant and should always be <64. -+func hash8(u uint64, h uint8) uint32 { -+ return uint32((u * prime8bytes) >> ((64 - h) & 63)) -+} -+ -+// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. -+// Preferably h should be a constant and should always be <64. -+func hash6(u uint64, h uint8) uint32 { -+ return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) -+} -+ -+// matchlen will return the match length between offsets and t in src. -+// The maximum length returned is maxMatchLength - 4. -+// It is assumed that s > t, that t >=0 and s < len(src). -+func (e *fastGen) matchlen(s, t int32, src []byte) int32 { -+ if debugDecode { -+ if t >= s { -+ panic(fmt.Sprint(""t >=s:"", t, s)) -+ } -+ if int(s) >= len(src) { -+ panic(fmt.Sprint(""s >= len(src):"", s, len(src))) -+ } -+ if t < 0 { -+ panic(fmt.Sprint(""t < 0:"", t)) -+ } -+ if s-t > maxMatchOffset { -+ panic(fmt.Sprint(s, ""-"", t, ""("", s-t, "") > maxMatchLength ("", maxMatchOffset, "")"")) -+ } -+ } -+ s1 := int(s) + maxMatchLength - 4 -+ if s1 > len(src) { -+ s1 = len(src) -+ } -+ -+ // Extend the match to be as long as possible. -+ return int32(matchLen(src[s:s1], src[t:])) -+} -+ -+// matchlenLong will return the match length between offsets and t in src. -+// It is assumed that s > t, that t >=0 and s < len(src). -+func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { -+ if debugDecode { -+ if t >= s { -+ panic(fmt.Sprint(""t >=s:"", t, s)) -+ } -+ if int(s) >= len(src) { -+ panic(fmt.Sprint(""s >= len(src):"", s, len(src))) -+ } -+ if t < 0 { -+ panic(fmt.Sprint(""t < 0:"", t)) -+ } -+ if s-t > maxMatchOffset { -+ panic(fmt.Sprint(s, ""-"", t, ""("", s-t, "") > maxMatchLength ("", maxMatchOffset, "")"")) -+ } -+ } -+ // Extend the match to be as long as possible. -+ return int32(matchLen(src[s:], src[t:])) -+} -+ -+// Reset the encoding table. -+func (e *fastGen) Reset() { -+ if cap(e.hist) < int(maxMatchOffset*8) { -+ l := maxMatchOffset * 8 -+ // Make it at least 1MB. -+ if l < 1<<20 { -+ l = 1 << 20 -+ } -+ e.hist = make([]byte, 0, l) -+ } -+ // We offset current position so everything will be out of reach -+ e.cur += maxMatchOffset + int32(len(e.hist)) -+ e.hist = e.hist[:0] -+} -+ -+// matchLen returns the maximum length. -+// 'a' must be the shortest of the two. -+func matchLen(a, b []byte) int { -+ b = b[:len(a)] -+ var checked int -+ if len(a) > 4 { -+ // Try 4 bytes first -+ if diff := load32(a, 0) ^ load32(b, 0); diff != 0 { -+ return bits.TrailingZeros32(diff) >> 3 -+ } -+ // Switch to 8 byte matching. -+ checked = 4 -+ a = a[4:] -+ b = b[4:] -+ for len(a) >= 8 { -+ b = b[:len(a)] -+ if diff := load64(a, 0) ^ load64(b, 0); diff != 0 { -+ return checked + (bits.TrailingZeros64(diff) >> 3) -+ } -+ checked += 8 -+ a = a[8:] -+ b = b[8:] -+ } -+ } -+ b = b[:len(a)] -+ for i := range a { -+ if a[i] != b[i] { -+ return int(i) + checked -+ } -+ } -+ return len(a) + checked -+} -diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go -index f46c654189fc6..dd74ffb87232b 100644 ---- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go -+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go -@@ -85,26 +85,48 @@ type huffmanBitWriter struct { - // Data waiting to be written is bytes[0:nbytes] - // and then the low nbits of bits. - bits uint64 -- nbits uint -- bytes [256]byte -- codegenFreq [codegenCodeCount]int32 -+ nbits uint16 - nbytes uint8 -- literalFreq []int32 -- offsetFreq []int32 -- codegen []uint8 - literalEncoding *huffmanEncoder - offsetEncoding *huffmanEncoder - codegenEncoding *huffmanEncoder - err error -+ lastHeader int -+ // Set between 0 (reused block can be up to 2x the size) -+ logReusePenalty uint -+ lastHuffMan bool -+ bytes [256]byte -+ literalFreq [lengthCodesStart + 32]uint16 -+ offsetFreq [32]uint16 -+ codegenFreq [codegenCodeCount]uint16 -+ -+ // codegen must have an extra space for the final symbol. -+ codegen [literalCount + offsetCodeCount + 1]uint8 - } - -+// Huffman reuse. -+// -+// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections. -+// -+// This is controlled by several variables: -+// -+// If lastHeader is non-zero the Huffman table can be reused. -+// This also indicates that a Huffman table has been generated that can output all -+// possible symbols. -+// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated -+// an EOB with the previous table must be written. -+// -+// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid. -+// -+// An incoming block estimates the output size of a new table using a 'fresh' by calculating the -+// optimal size and adding a penalty in 'logReusePenalty'. -+// A Huffman table is not optimal, which is why we add a penalty, and generating a new table -+// is slower both for compression and decompression. -+ - func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { - return &huffmanBitWriter{ - writer: w, -- literalFreq: make([]int32, lengthCodesStart+32), -- offsetFreq: make([]int32, 32), -- codegen: make([]uint8, maxNumLit+offsetCodeCount+1), -- literalEncoding: newHuffmanEncoder(maxNumLit), -+ literalEncoding: newHuffmanEncoder(literalCount), - codegenEncoding: newHuffmanEncoder(codegenCodeCount), - offsetEncoding: newHuffmanEncoder(offsetCodeCount), - } -@@ -113,7 +135,41 @@ func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { - func (w *huffmanBitWriter) reset(writer io.Writer) { - w.writer = writer - w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil -- w.bytes = [256]byte{} -+ w.lastHeader = 0 -+ w.lastHuffMan = false -+} -+ -+func (w *huffmanBitWriter) canReuse(t *tokens) (offsets, lits bool) { -+ offsets, lits = true, true -+ a := t.offHist[:offsetCodeCount] -+ b := w.offsetFreq[:len(a)] -+ for i := range a { -+ if b[i] == 0 && a[i] != 0 { -+ offsets = false -+ break -+ } -+ } -+ -+ a = t.extraHist[:literalCount-256] -+ b = w.literalFreq[256:literalCount] -+ b = b[:len(a)] -+ for i := range a { -+ if b[i] == 0 && a[i] != 0 { -+ lits = false -+ break -+ } -+ } -+ if lits { -+ a = t.litHist[:] -+ b = w.literalFreq[:len(a)] -+ for i := range a { -+ if b[i] == 0 && a[i] != 0 { -+ lits = false -+ break -+ } -+ } -+ } -+ return - } - - func (w *huffmanBitWriter) flush() { -@@ -144,30 +200,11 @@ func (w *huffmanBitWriter) write(b []byte) { - _, w.err = w.writer.Write(b) - } - --func (w *huffmanBitWriter) writeBits(b int32, nb uint) { -- w.bits |= uint64(b) << w.nbits -+func (w *huffmanBitWriter) writeBits(b int32, nb uint16) { -+ w.bits |= uint64(b) << (w.nbits & 63) - w.nbits += nb - if w.nbits >= 48 { -- bits := w.bits -- w.bits >>= 48 -- w.nbits -= 48 -- n := w.nbytes -- w.bytes[n] = byte(bits) -- w.bytes[n+1] = byte(bits >> 8) -- w.bytes[n+2] = byte(bits >> 16) -- w.bytes[n+3] = byte(bits >> 24) -- w.bytes[n+4] = byte(bits >> 32) -- w.bytes[n+5] = byte(bits >> 40) -- n += 6 -- if n >= bufferFlushSize { -- if w.err != nil { -- n = 0 -- return -- } -- w.write(w.bytes[:n]) -- n = 0 -- } -- w.nbytes = n -+ w.writeOutBits() - } - } - -@@ -213,7 +250,7 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE - // a copy of the frequencies, and as the place where we put the result. - // This is fine because the output is always shorter than the input used - // so far. -- codegen := w.codegen // cache -+ codegen := w.codegen[:] // cache - // Copy the concatenated code sizes to codegen. Put a marker at the end. - cgnl := codegen[:numLiterals] - for i := range cgnl { -@@ -292,30 +329,54 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE - codegen[outIndex] = badCode - } - --// dynamicSize returns the size of dynamically encoded data in bits. --func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { -+func (w *huffmanBitWriter) codegens() int { -+ numCodegens := len(w.codegenFreq) -+ for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { -+ numCodegens-- -+ } -+ return numCodegens -+} -+ -+func (w *huffmanBitWriter) headerSize() (size, numCodegens int) { - numCodegens = len(w.codegenFreq) - for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { - numCodegens-- - } -- header := 3 + 5 + 5 + 4 + (3 * numCodegens) + -+ return 3 + 5 + 5 + 4 + (3 * numCodegens) + - w.codegenEncoding.bitLength(w.codegenFreq[:]) + - int(w.codegenFreq[16])*2 + - int(w.codegenFreq[17])*3 + -- int(w.codegenFreq[18])*7 -+ int(w.codegenFreq[18])*7, numCodegens -+} -+ -+// dynamicSize returns the size of dynamically encoded data in bits. -+func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { -+ header, numCodegens := w.headerSize() - size = header + -- litEnc.bitLength(w.literalFreq) + -- offEnc.bitLength(w.offsetFreq) + -+ litEnc.bitLength(w.literalFreq[:]) + -+ offEnc.bitLength(w.offsetFreq[:]) + - extraBits -- - return size, numCodegens - } - -+// extraBitSize will return the number of bits that will be written -+// as ""extra"" bits on matches. -+func (w *huffmanBitWriter) extraBitSize() int { -+ total := 0 -+ for i, n := range w.literalFreq[257:literalCount] { -+ total += int(n) * int(lengthExtraBits[i&31]) -+ } -+ for i, n := range w.offsetFreq[:offsetCodeCount] { -+ total += int(n) * int(offsetExtraBits[i&31]) -+ } -+ return total -+} -+ - // fixedSize returns the size of dynamically encoded data in bits. - func (w *huffmanBitWriter) fixedSize(extraBits int) int { - return 3 + -- fixedLiteralEncoding.bitLength(w.literalFreq) + -- fixedOffsetEncoding.bitLength(w.offsetFreq) + -+ fixedLiteralEncoding.bitLength(w.literalFreq[:]) + -+ fixedOffsetEncoding.bitLength(w.offsetFreq[:]) + - extraBits - } - -@@ -333,30 +394,36 @@ func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { - } - - func (w *huffmanBitWriter) writeCode(c hcode) { -+ // The function does not get inlined if we ""& 63"" the shift. - w.bits |= uint64(c.code) << w.nbits -- w.nbits += uint(c.len) -+ w.nbits += c.len - if w.nbits >= 48 { -- bits := w.bits -- w.bits >>= 48 -- w.nbits -= 48 -- n := w.nbytes -- w.bytes[n] = byte(bits) -- w.bytes[n+1] = byte(bits >> 8) -- w.bytes[n+2] = byte(bits >> 16) -- w.bytes[n+3] = byte(bits >> 24) -- w.bytes[n+4] = byte(bits >> 32) -- w.bytes[n+5] = byte(bits >> 40) -- n += 6 -- if n >= bufferFlushSize { -- if w.err != nil { -- n = 0 -- return -- } -- w.write(w.bytes[:n]) -+ w.writeOutBits() -+ } -+} -+ -+// writeOutBits will write bits to the buffer. -+func (w *huffmanBitWriter) writeOutBits() { -+ bits := w.bits -+ w.bits >>= 48 -+ w.nbits -= 48 -+ n := w.nbytes -+ w.bytes[n] = byte(bits) -+ w.bytes[n+1] = byte(bits >> 8) -+ w.bytes[n+2] = byte(bits >> 16) -+ w.bytes[n+3] = byte(bits >> 24) -+ w.bytes[n+4] = byte(bits >> 32) -+ w.bytes[n+5] = byte(bits >> 40) -+ n += 6 -+ if n >= bufferFlushSize { -+ if w.err != nil { - n = 0 -+ return - } -- w.nbytes = n -+ w.write(w.bytes[:n]) -+ n = 0 - } -+ w.nbytes = n - } - - // Write the header of a dynamic Huffman block to the output stream. -@@ -395,15 +462,12 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n - case 16: - w.writeBits(int32(w.codegen[i]), 2) - i++ -- break - case 17: - w.writeBits(int32(w.codegen[i]), 3) - i++ -- break - case 18: - w.writeBits(int32(w.codegen[i]), 7) - i++ -- break - } - } - } -@@ -412,6 +476,11 @@ func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { - if w.err != nil { - return - } -+ if w.lastHeader > 0 { -+ // We owe an EOB -+ w.writeCode(w.literalEncoding.codes[endBlockMarker]) -+ w.lastHeader = 0 -+ } - var flag int32 - if isEof { - flag = 1 -@@ -426,6 +495,12 @@ func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { - if w.err != nil { - return - } -+ if w.lastHeader > 0 { -+ // We owe an EOB -+ w.writeCode(w.literalEncoding.codes[endBlockMarker]) -+ w.lastHeader = 0 -+ } -+ - // Indicate that we are a fixed Huffman block - var value int32 = 2 - if isEof { -@@ -439,29 +514,23 @@ func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { - // is larger than the original bytes, the data will be written as a - // stored block. - // If the input is nil, the tokens will always be Huffman encoded. --func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { -+func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { - if w.err != nil { - return - } - -- tokens = append(tokens, endBlockMarker) -- numLiterals, numOffsets := w.indexTokens(tokens) -- -+ tokens.AddEOB() -+ if w.lastHeader > 0 { -+ // We owe an EOB -+ w.writeCode(w.literalEncoding.codes[endBlockMarker]) -+ w.lastHeader = 0 -+ } -+ numLiterals, numOffsets := w.indexTokens(tokens, false) -+ w.generate(tokens) - var extraBits int - storedSize, storable := w.storedSize(input) - if storable { -- // We only bother calculating the costs of the extra bits required by -- // the length of offset fields (which will be the same for both fixed -- // and dynamic encoding), if we need to compare those two encodings -- // against stored encoding. -- for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ { -- // First eight length codes have extra size = 0. -- extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart]) -- } -- for offsetCode := 4; offsetCode < numOffsets; offsetCode++ { -- // First four offset codes have extra size = 0. -- extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode&63]) -- } -+ extraBits = w.extraBitSize() - } - - // Figure out smallest code. -@@ -500,7 +569,7 @@ func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { - } - - // Write the tokens. -- w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes) -+ w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes) - } - - // writeBlockDynamic encodes a block using a dynamic Huffman table. -@@ -508,72 +577,103 @@ func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { - // histogram distribution. - // If input is supplied and the compression savings are below 1/16th of the - // input size the block is stored. --func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) { -+func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) { - if w.err != nil { - return - } - -- tokens = append(tokens, endBlockMarker) -- numLiterals, numOffsets := w.indexTokens(tokens) -+ sync = sync || eof -+ if sync { -+ tokens.AddEOB() -+ } - -- // Generate codegen and codegenFrequencies, which indicates how to encode -- // the literalEncoding and the offsetEncoding. -- w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) -- w.codegenEncoding.generate(w.codegenFreq[:], 7) -- size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0) -+ // We cannot reuse pure huffman table. -+ if w.lastHuffMan && w.lastHeader > 0 { -+ // We will not try to reuse. -+ w.writeCode(w.literalEncoding.codes[endBlockMarker]) -+ w.lastHeader = 0 -+ w.lastHuffMan = false -+ } -+ if !sync { -+ tokens.Fill() -+ } -+ numLiterals, numOffsets := w.indexTokens(tokens, !sync) - -- // Store bytes, if we don't get a reasonable improvement. -- if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { -- w.writeStoredHeader(len(input), eof) -- w.writeBytes(input) -- return -+ var size int -+ // Check if we should reuse. -+ if w.lastHeader > 0 { -+ // Estimate size for using a new table -+ newSize := w.lastHeader + tokens.EstimatedBits() -+ -+ // The estimated size is calculated as an optimal table. -+ // We add a penalty to make it more realistic and re-use a bit more. -+ newSize += newSize >> (w.logReusePenalty & 31) -+ extra := w.extraBitSize() -+ reuseSize, _ := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extra) -+ -+ // Check if a new table is better. -+ if newSize < reuseSize { -+ // Write the EOB we owe. -+ w.writeCode(w.literalEncoding.codes[endBlockMarker]) -+ size = newSize -+ w.lastHeader = 0 -+ } else { -+ size = reuseSize -+ } -+ // Check if we get a reasonable size decrease. -+ if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { -+ w.writeStoredHeader(len(input), eof) -+ w.writeBytes(input) -+ w.lastHeader = 0 -+ return -+ } - } - -- // Write Huffman table. -- w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) -+ // We want a new block/table -+ if w.lastHeader == 0 { -+ w.generate(tokens) -+ // Generate codegen and codegenFrequencies, which indicates how to encode -+ // the literalEncoding and the offsetEncoding. -+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) -+ w.codegenEncoding.generate(w.codegenFreq[:], 7) -+ var numCodegens int -+ size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, w.extraBitSize()) -+ // Store bytes, if we don't get a reasonable improvement. -+ if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { -+ w.writeStoredHeader(len(input), eof) -+ w.writeBytes(input) -+ w.lastHeader = 0 -+ return -+ } -+ -+ // Write Huffman table. -+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) -+ w.lastHeader, _ = w.headerSize() -+ w.lastHuffMan = false -+ } - -+ if sync { -+ w.lastHeader = 0 -+ } - // Write the tokens. -- w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes) -+ w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes) - } - - // indexTokens indexes a slice of tokens, and updates - // literalFreq and offsetFreq, and generates literalEncoding - // and offsetEncoding. - // The number of literal and offset tokens is returned. --func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) { -- for i := range w.literalFreq { -- w.literalFreq[i] = 0 -- } -- for i := range w.offsetFreq { -- w.offsetFreq[i] = 0 -- } -+func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { -+ copy(w.literalFreq[:], t.litHist[:]) -+ copy(w.literalFreq[256:], t.extraHist[:]) -+ copy(w.offsetFreq[:], t.offHist[:offsetCodeCount]) - -- if len(tokens) == 0 { -+ if t.n == 0 { - return - } -- -- // Only last token should be endBlockMarker. -- if tokens[len(tokens)-1] == endBlockMarker { -- w.literalFreq[endBlockMarker]++ -- tokens = tokens[:len(tokens)-1] -+ if filled { -+ return maxNumLit, maxNumDist - } -- -- // Create slices up to the next power of two to avoid bounds checks. -- lits := w.literalFreq[:256] -- offs := w.offsetFreq[:32] -- lengths := w.literalFreq[lengthCodesStart:] -- lengths = lengths[:32] -- for _, t := range tokens { -- if t < endBlockMarker { -- lits[t.literal()]++ -- continue -- } -- length := t.length() -- offset := t.offset() -- lengths[lengthCode(length)&31]++ -- offs[offsetCode(offset)&31]++ -- } -- - // get the number of literals - numLiterals = len(w.literalFreq) - for w.literalFreq[numLiterals-1] == 0 { -@@ -590,11 +690,14 @@ func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets - w.offsetFreq[0] = 1 - numOffsets = 1 - } -- w.literalEncoding.generate(w.literalFreq[:maxNumLit], 15) -- w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) - return - } - -+func (w *huffmanBitWriter) generate(t *tokens) { -+ w.literalEncoding.generate(w.literalFreq[:literalCount], 15) -+ w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) -+} -+ - // writeTokens writes a slice of tokens to the output. - // codes for literal and offset encoding must be supplied. - func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { -@@ -626,8 +729,19 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) - // Write the length - length := t.length() - lengthCode := lengthCode(length) -- w.writeCode(lengths[lengthCode&31]) -- extraLengthBits := uint(lengthExtraBits[lengthCode&31]) -+ if false { -+ w.writeCode(lengths[lengthCode&31]) -+ } else { -+ // inlined -+ c := lengths[lengthCode&31] -+ w.bits |= uint64(c.code) << (w.nbits & 63) -+ w.nbits += c.len -+ if w.nbits >= 48 { -+ w.writeOutBits() -+ } -+ } -+ -+ extraLengthBits := uint16(lengthExtraBits[lengthCode&31]) - if extraLengthBits > 0 { - extraLength := int32(length - lengthBase[lengthCode&31]) - w.writeBits(extraLength, extraLengthBits) -@@ -635,8 +749,18 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) - // Write the offset - offset := t.offset() - offsetCode := offsetCode(offset) -- w.writeCode(offs[offsetCode&31]) -- extraOffsetBits := uint(offsetExtraBits[offsetCode&63]) -+ if false { -+ w.writeCode(offs[offsetCode&31]) -+ } else { -+ // inlined -+ c := offs[offsetCode&31] -+ w.bits |= uint64(c.code) << (w.nbits & 63) -+ w.nbits += c.len -+ if w.nbits >= 48 { -+ w.writeOutBits() -+ } -+ } -+ extraOffsetBits := uint16(offsetExtraBits[offsetCode&63]) - if extraOffsetBits > 0 { - extraOffset := int32(offset - offsetBase[offsetCode&63]) - w.writeBits(extraOffset, extraOffsetBits) -@@ -661,75 +785,93 @@ func init() { - // writeBlockHuff encodes a block of bytes as either - // Huffman encoded literals or uncompressed bytes if the - // results only gains very little from compression. --func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) { -+func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { - if w.err != nil { - return - } - - // Clear histogram -- for i := range w.literalFreq { -+ for i := range w.literalFreq[:] { - w.literalFreq[i] = 0 - } -+ if !w.lastHuffMan { -+ for i := range w.offsetFreq[:] { -+ w.offsetFreq[i] = 0 -+ } -+ } - - // Add everything as literals -- histogram(input, w.literalFreq) -+ estBits := histogramSize(input, w.literalFreq[:], !eof && !sync) + 15 - -- w.literalFreq[endBlockMarker] = 1 -+ // Store bytes, if we don't get a reasonable improvement. -+ ssize, storable := w.storedSize(input) -+ if storable && ssize < (estBits+estBits>>4) { -+ w.writeStoredHeader(len(input), eof) -+ w.writeBytes(input) -+ return -+ } - -- const numLiterals = endBlockMarker + 1 -- const numOffsets = 1 -+ if w.lastHeader > 0 { -+ size, _ := w.dynamicSize(w.literalEncoding, huffOffset, w.lastHeader) -+ estBits += estBits >> (w.logReusePenalty) - -- w.literalEncoding.generate(w.literalFreq[:maxNumLit], 15) -+ if estBits < size { -+ // We owe an EOB -+ w.writeCode(w.literalEncoding.codes[endBlockMarker]) -+ w.lastHeader = 0 -+ } -+ } - -- // Figure out smallest code. -- // Always use dynamic Huffman or Store -- var numCodegens int -+ const numLiterals = endBlockMarker + 1 -+ const numOffsets = 1 -+ if w.lastHeader == 0 { -+ w.literalFreq[endBlockMarker] = 1 -+ w.literalEncoding.generate(w.literalFreq[:numLiterals], 15) - -- // Generate codegen and codegenFrequencies, which indicates how to encode -- // the literalEncoding and the offsetEncoding. -- w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) -- w.codegenEncoding.generate(w.codegenFreq[:], 7) -- size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0) -+ // Generate codegen and codegenFrequencies, which indicates how to encode -+ // the literalEncoding and the offsetEncoding. -+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) -+ w.codegenEncoding.generate(w.codegenFreq[:], 7) -+ numCodegens := w.codegens() - -- // Store bytes, if we don't get a reasonable improvement. -- if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { -- w.writeStoredHeader(len(input), eof) -- w.writeBytes(input) -- return -+ // Huffman. -+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) -+ w.lastHuffMan = true -+ w.lastHeader, _ = w.headerSize() - } - -- // Huffman. -- w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - encoding := w.literalEncoding.codes[:257] -- n := w.nbytes - for _, t := range input { - // Bitwriting inlined, ~30% speedup - c := encoding[t] -- w.bits |= uint64(c.code) << w.nbits -- w.nbits += uint(c.len) -- if w.nbits < 48 { -- continue -- } -- // Store 6 bytes -- bits := w.bits -- w.bits >>= 48 -- w.nbits -= 48 -- w.bytes[n] = byte(bits) -- w.bytes[n+1] = byte(bits >> 8) -- w.bytes[n+2] = byte(bits >> 16) -- w.bytes[n+3] = byte(bits >> 24) -- w.bytes[n+4] = byte(bits >> 32) -- w.bytes[n+5] = byte(bits >> 40) -- n += 6 -- if n < bufferFlushSize { -- continue -- } -- w.write(w.bytes[:n]) -- if w.err != nil { -- return // Return early in the event of write failures -+ w.bits |= uint64(c.code) << ((w.nbits) & 63) -+ w.nbits += c.len -+ if w.nbits >= 48 { -+ bits := w.bits -+ w.bits >>= 48 -+ w.nbits -= 48 -+ n := w.nbytes -+ w.bytes[n] = byte(bits) -+ w.bytes[n+1] = byte(bits >> 8) -+ w.bytes[n+2] = byte(bits >> 16) -+ w.bytes[n+3] = byte(bits >> 24) -+ w.bytes[n+4] = byte(bits >> 32) -+ w.bytes[n+5] = byte(bits >> 40) -+ n += 6 -+ if n >= bufferFlushSize { -+ if w.err != nil { -+ n = 0 -+ return -+ } -+ w.write(w.bytes[:n]) -+ n = 0 -+ } -+ w.nbytes = n - } -- n = 0 - } -- w.nbytes = n -- w.writeCode(encoding[endBlockMarker]) -+ if eof || sync { -+ w.writeCode(encoding[endBlockMarker]) -+ w.lastHeader = 0 -+ w.lastHuffMan = false -+ } - } -diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go -index f65f793361480..1810c6898d0b6 100644 ---- a/vendor/github.com/klauspost/compress/flate/huffman_code.go -+++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go -@@ -10,6 +10,12 @@ import ( - ""sort"" - ) - -+const ( -+ maxBitsLimit = 16 -+ // number of valid literals -+ literalCount = 286 -+) -+ - // hcode is a huffman code with a bit code and bit length. - type hcode struct { - code, len uint16 -@@ -25,7 +31,7 @@ type huffmanEncoder struct { - - type literalNode struct { - literal uint16 -- freq int32 -+ freq uint16 - } - - // A levelInfo describes the state of the constructed tree for a given depth. -@@ -54,7 +60,11 @@ func (h *hcode) set(code uint16, length uint16) { - h.code = code - } - --func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} } -+func reverseBits(number uint16, bitLength byte) uint16 { -+ return bits.Reverse16(number << ((16 - bitLength) & 15)) -+} -+ -+func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} } - - func newHuffmanEncoder(size int) *huffmanEncoder { - // Make capacity to next power of two. -@@ -64,10 +74,10 @@ func newHuffmanEncoder(size int) *huffmanEncoder { - - // Generates a HuffmanCode corresponding to the fixed literal table - func generateFixedLiteralEncoding() *huffmanEncoder { -- h := newHuffmanEncoder(maxNumLit) -+ h := newHuffmanEncoder(literalCount) - codes := h.codes - var ch uint16 -- for ch = 0; ch < maxNumLit; ch++ { -+ for ch = 0; ch < literalCount; ch++ { - var bits uint16 - var size uint16 - switch { -@@ -75,17 +85,14 @@ func generateFixedLiteralEncoding() *huffmanEncoder { - // size 8, 000110000 .. 10111111 - bits = ch + 48 - size = 8 -- break - case ch < 256: - // size 9, 110010000 .. 111111111 - bits = ch + 400 - 144 - size = 9 -- break - case ch < 280: - // size 7, 0000000 .. 0010111 - bits = ch - 256 - size = 7 -- break - default: - // size 8, 11000000 .. 11000111 - bits = ch + 192 - 280 -@@ -108,7 +115,7 @@ func generateFixedOffsetEncoding() *huffmanEncoder { - var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding() - var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding() - --func (h *huffmanEncoder) bitLength(freq []int32) int { -+func (h *huffmanEncoder) bitLength(freq []uint16) int { - var total int - for i, f := range freq { - if f != 0 { -@@ -118,8 +125,6 @@ func (h *huffmanEncoder) bitLength(freq []int32) int { - return total - } - --const maxBitsLimit = 16 -- - // Return the number of literals assigned to each bit size in the Huffman encoding - // - // This method is only called when list.length >= 3 -@@ -163,9 +168,9 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { - // We initialize the levels as if we had already figured this out. - levels[level] = levelInfo{ - level: level, -- lastFreq: list[1].freq, -- nextCharFreq: list[2].freq, -- nextPairFreq: list[0].freq + list[1].freq, -+ lastFreq: int32(list[1].freq), -+ nextCharFreq: int32(list[2].freq), -+ nextPairFreq: int32(list[0].freq) + int32(list[1].freq), - } - leafCounts[level][level] = 2 - if level == 1 { -@@ -197,7 +202,12 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { - l.lastFreq = l.nextCharFreq - // Lower leafCounts are the same of the previous node. - leafCounts[level][level] = n -- l.nextCharFreq = list[n].freq -+ e := list[n] -+ if e.literal < math.MaxUint16 { -+ l.nextCharFreq = int32(e.freq) -+ } else { -+ l.nextCharFreq = math.MaxInt32 -+ } - } else { - // The next item on this row is a pair from the previous row. - // nextPairFreq isn't valid until we generate two -@@ -273,12 +283,12 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN - // - // freq An array of frequencies, in which frequency[i] gives the frequency of literal i. - // maxBits The maximum number of bits to use for any literal. --func (h *huffmanEncoder) generate(freq []int32, maxBits int32) { -+func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { - if h.freqcache == nil { - // Allocate a reusable buffer with the longest possible frequency table. -- // Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit. -- // The largest of these is maxNumLit, so we allocate for that case. -- h.freqcache = make([]literalNode, maxNumLit+1) -+ // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. -+ // The largest of these is literalCount, so we allocate for that case. -+ h.freqcache = make([]literalNode, literalCount+1) - } - list := h.freqcache[:len(freq)+1] - // Number of non-zero literals -@@ -345,3 +355,27 @@ func (s byFreq) Less(i, j int) bool { - } - - func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -+ -+// histogramSize accumulates a histogram of b in h. -+// An estimated size in bits is returned. -+// Unassigned values are assigned '1' in the histogram. -+// len(h) must be >= 256, and h's elements must be all zeroes. -+func histogramSize(b []byte, h []uint16, fill bool) int { -+ h = h[:256] -+ for _, t := range b { -+ h[t]++ -+ } -+ invTotal := 1.0 / float64(len(b)) -+ shannon := 0.0 -+ single := math.Ceil(-math.Log2(invTotal)) -+ for i, v := range h[:] { -+ if v > 0 { -+ n := float64(v) -+ shannon += math.Ceil(-math.Log2(n*invTotal) * n) -+ } else if fill { -+ shannon += single -+ h[i] = 1 -+ } -+ } -+ return int(shannon + 0.99) -+} -diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go -index 800d0ce9e5452..6dc5b5d06e303 100644 ---- a/vendor/github.com/klauspost/compress/flate/inflate.go -+++ b/vendor/github.com/klauspost/compress/flate/inflate.go -@@ -9,6 +9,7 @@ package flate - - import ( - ""bufio"" -+ ""fmt"" - ""io"" - ""math/bits"" - ""strconv"" -@@ -24,6 +25,8 @@ const ( - maxNumLit = 286 - maxNumDist = 30 - numCodes = 19 // number of codes in Huffman meta-code -+ -+ debugDecode = false - ) - - // Initialize the fixedHuffmanDecoder only once upon first use. -@@ -104,8 +107,8 @@ const ( - - type huffmanDecoder struct { - min int // the minimum code length -- chunks *[huffmanNumChunks]uint32 // chunks as described above -- links [][]uint32 // overflow links -+ chunks *[huffmanNumChunks]uint16 // chunks as described above -+ links [][]uint16 // overflow links - linkMask uint32 // mask the width of the link table - } - -@@ -121,7 +124,7 @@ func (h *huffmanDecoder) init(lengths []int) bool { - const sanity = false - - if h.chunks == nil { -- h.chunks = &[huffmanNumChunks]uint32{} -+ h.chunks = &[huffmanNumChunks]uint16{} - } - if h.min != 0 { - *h = huffmanDecoder{chunks: h.chunks, links: h.links} -@@ -169,6 +172,9 @@ func (h *huffmanDecoder) init(lengths []int) bool { - // accept degenerate single-code codings. See also - // TestDegenerateHuffmanCoding. - if code != 1<> 1 - if cap(h.links) < huffmanNumChunks-link { -- h.links = make([][]uint32, huffmanNumChunks-link) -+ h.links = make([][]uint16, huffmanNumChunks-link) - } else { - h.links = h.links[:huffmanNumChunks-link] - } -@@ -196,9 +202,9 @@ func (h *huffmanDecoder) init(lengths []int) bool { - if sanity && h.chunks[reverse] != 0 { - panic(""impossible: overwriting existing chunk"") - } -- h.chunks[reverse] = uint32(off<>= uint(16 - n) - if n <= huffmanChunkBits { -@@ -347,6 +353,9 @@ func (f *decompressor) nextBlock() { - f.huffmanBlock() - default: - // 3 is reserved. -+ if debugDecode { -+ fmt.Println(""reserved data block encountered"") -+ } - f.err = CorruptInputError(f.roffset) - } - } -@@ -425,11 +434,17 @@ func (f *decompressor) readHuffman() error { - } - nlit := int(f.b&0x1F) + 257 - if nlit > maxNumLit { -+ if debugDecode { -+ fmt.Println(""nlit > maxNumLit"", nlit) -+ } - return CorruptInputError(f.roffset) - } - f.b >>= 5 - ndist := int(f.b&0x1F) + 1 - if ndist > maxNumDist { -+ if debugDecode { -+ fmt.Println(""ndist > maxNumDist"", ndist) -+ } - return CorruptInputError(f.roffset) - } - f.b >>= 5 -@@ -453,6 +468,9 @@ func (f *decompressor) readHuffman() error { - f.codebits[codeOrder[i]] = 0 - } - if !f.h1.init(f.codebits[0:]) { -+ if debugDecode { -+ fmt.Println(""init codebits failed"") -+ } - return CorruptInputError(f.roffset) - } - -@@ -480,6 +498,9 @@ func (f *decompressor) readHuffman() error { - rep = 3 - nb = 2 - if i == 0 { -+ if debugDecode { -+ fmt.Println(""i==0"") -+ } - return CorruptInputError(f.roffset) - } - b = f.bits[i-1] -@@ -494,6 +515,9 @@ func (f *decompressor) readHuffman() error { - } - for f.nb < nb { - if err := f.moreBits(); err != nil { -+ if debugDecode { -+ fmt.Println(""morebits:"", err) -+ } - return err - } - } -@@ -501,6 +525,9 @@ func (f *decompressor) readHuffman() error { - f.b >>= nb - f.nb -= nb - if i+rep > n { -+ if debugDecode { -+ fmt.Println(""i+rep > n"", i, rep, n) -+ } - return CorruptInputError(f.roffset) - } - for j := 0; j < rep; j++ { -@@ -510,6 +537,9 @@ func (f *decompressor) readHuffman() error { - } - - if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { -+ if debugDecode { -+ fmt.Println(""init2 failed"") -+ } - return CorruptInputError(f.roffset) - } - -@@ -587,12 +617,18 @@ readLiteral: - length = 258 - n = 0 - default: -+ if debugDecode { -+ fmt.Println(v, "">= maxNumLit"") -+ } - f.err = CorruptInputError(f.roffset) - return - } - if n > 0 { - for f.nb < n { - if err = f.moreBits(); err != nil { -+ if debugDecode { -+ fmt.Println(""morebits n>0:"", err) -+ } - f.err = err - return - } -@@ -606,6 +642,9 @@ readLiteral: - if f.hd == nil { - for f.nb < 5 { - if err = f.moreBits(); err != nil { -+ if debugDecode { -+ fmt.Println(""morebits f.nb<5:"", err) -+ } - f.err = err - return - } -@@ -615,6 +654,9 @@ readLiteral: - f.nb -= 5 - } else { - if dist, err = f.huffSym(f.hd); err != nil { -+ if debugDecode { -+ fmt.Println(""huffsym:"", err) -+ } - f.err = err - return - } -@@ -629,6 +671,9 @@ readLiteral: - extra := (dist & 1) << nb - for f.nb < nb { - if err = f.moreBits(); err != nil { -+ if debugDecode { -+ fmt.Println(""morebits f.nb f.dict.histSize() { -+ if debugDecode { -+ fmt.Println(""dist > f.dict.histSize():"", dist, f.dict.histSize()) -+ } - f.err = CorruptInputError(f.roffset) - return - } -@@ -688,6 +739,9 @@ func (f *decompressor) dataBlock() { - n := int(f.buf[0]) | int(f.buf[1])<<8 - nn := int(f.buf[2]) | int(f.buf[3])<<8 - if uint16(nn) != uint16(^n) { -+ if debugDecode { -+ fmt.Println(""uint16(nn) != uint16(^n)"", nn, ^n) -+ } - f.err = CorruptInputError(f.roffset) - return - } -@@ -789,6 +843,9 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { - if n == 0 { - f.b = b - f.nb = nb -+ if debugDecode { -+ fmt.Println(""huffsym: n==0"") -+ } - f.err = CorruptInputError(f.roffset) - return 0, f.err - } -diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go -new file mode 100644 -index 0000000000000..20de8f11f4f37 ---- /dev/null -+++ b/vendor/github.com/klauspost/compress/flate/level1.go -@@ -0,0 +1,174 @@ -+package flate -+ -+// fastGen maintains the table for matches, -+// and the previous byte block for level 2. -+// This is the generic implementation. -+type fastEncL1 struct { -+ fastGen -+ table [tableSize]tableEntry -+} -+ -+// EncodeL1 uses a similar algorithm to level 1 -+func (e *fastEncL1) Encode(dst *tokens, src []byte) { -+ const ( -+ inputMargin = 12 - 1 -+ minNonLiteralBlockSize = 1 + 1 + inputMargin -+ ) -+ -+ // Protect against e.cur wraparound. -+ for e.cur >= bufferReset { -+ if len(e.hist) == 0 { -+ for i := range e.table[:] { -+ e.table[i] = tableEntry{} -+ } -+ e.cur = maxMatchOffset -+ break -+ } -+ // Shift down everything in the table that isn't already too far away. -+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset -+ for i := range e.table[:] { -+ v := e.table[i].offset -+ if v <= minOff { -+ v = 0 -+ } else { -+ v = v - e.cur + maxMatchOffset -+ } -+ e.table[i].offset = v -+ } -+ e.cur = maxMatchOffset -+ } -+ -+ s := e.addBlock(src) -+ -+ // This check isn't in the Snappy implementation, but there, the caller -+ // instead of the callee handles this case. -+ if len(src) < minNonLiteralBlockSize { -+ // We do not fill the token table. -+ // This will be picked up by caller. -+ dst.n = uint16(len(src)) -+ return -+ } -+ -+ // Override src -+ src = e.hist -+ nextEmit := s -+ -+ // sLimit is when to stop looking for offset/length copies. The inputMargin -+ // lets us use a fast path for emitLiteral in the main loop, while we are -+ // looking for copies. -+ sLimit := int32(len(src) - inputMargin) -+ -+ // nextEmit is where in src the next emitLiteral should start from. -+ cv := load3232(src, s) -+ -+ for { -+ const skipLog = 5 -+ const doEvery = 2 -+ -+ nextS := s -+ var candidate tableEntry -+ for { -+ nextHash := hash(cv) -+ candidate = e.table[nextHash] -+ nextS = s + doEvery + (s-nextEmit)>>skipLog -+ if nextS > sLimit { -+ goto emitRemainder -+ } -+ -+ now := load6432(src, nextS) -+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} -+ nextHash = hash(uint32(now)) -+ -+ offset := s - (candidate.offset - e.cur) -+ if offset < maxMatchOffset && cv == candidate.val { -+ e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)} -+ break -+ } -+ -+ // Do one right away... -+ cv = uint32(now) -+ s = nextS -+ nextS++ -+ candidate = e.table[nextHash] -+ now >>= 8 -+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} -+ -+ offset = s - (candidate.offset - e.cur) -+ if offset < maxMatchOffset && cv == candidate.val { -+ e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)} -+ break -+ } -+ cv = uint32(now) -+ s = nextS -+ } -+ -+ // A 4-byte match has been found. We'll later see if more than 4 bytes -+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit -+ // them as literal bytes. -+ for { -+ // Invariant: we have a 4-byte match at s, and no need to emit any -+ // literal bytes prior to s. -+ -+ // Extend the 4-byte match as long as possible. -+ t := candidate.offset - e.cur -+ l := e.matchlenLong(s+4, t+4, src) + 4 -+ -+ // Extend backwards -+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] { -+ s-- -+ t-- -+ l++ -+ } -+ if nextEmit < s { -+ emitLiteral(dst, src[nextEmit:s]) -+ } -+ -+ // Save the match found -+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) -+ s += l -+ nextEmit = s -+ if nextS >= s { -+ s = nextS + 1 -+ } -+ if s >= sLimit { -+ // Index first pair after match end. -+ if int(s+l+4) < len(src) { -+ cv := load3232(src, s) -+ e.table[hash(cv)] = tableEntry{offset: s + e.cur, val: cv} -+ } -+ goto emitRemainder -+ } -+ -+ // We could immediately start working at s now, but to improve -+ // compression we first update the hash table at s-2 and at s. If -+ // another emitCopy is not our next move, also calculate nextHash -+ // at s+1. At least on GOARCH=amd64, these three hash calculations -+ // are faster as one load64 call (with some shifts) instead of -+ // three load32 calls. -+ x := load6432(src, s-2) -+ o := e.cur + s - 2 -+ prevHash := hash(uint32(x)) -+ e.table[prevHash] = tableEntry{offset: o, val: uint32(x)} -+ x >>= 16 -+ currHash := hash(uint32(x)) -+ candidate = e.table[currHash] -+ e.table[currHash] = tableEntry{offset: o + 2, val: uint32(x)} -+ -+ offset := s - (candidate.offset - e.cur) -+ if offset > maxMatchOffset || uint32(x) != candidate.val { -+ cv = uint32(x >> 8) -+ s++ -+ break -+ } -+ } -+ } -+ -+emitRemainder: -+ if int(nextEmit) < len(src) { -+ // If nothing was added, don't encode literals. -+ if dst.n == 0 { -+ return -+ } -+ emitLiteral(dst, src[nextEmit:]) -+ } -+} -diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go -new file mode 100644 -index 0000000000000..7c824431e6477 ---- /dev/null -+++ b/vendor/github.com/klauspost/compress/flate/level2.go -@@ -0,0 +1,199 @@ -+package flate -+ -+// fastGen maintains the table for matches, -+// and the previous byte block for level 2. -+// This is the generic implementation. -+type fastEncL2 struct { -+ fastGen -+ table [bTableSize]tableEntry -+} -+ -+// EncodeL2 uses a similar algorithm to level 1, but is capable -+// of matching across blocks giving better compression at a small slowdown. -+func (e *fastEncL2) Encode(dst *tokens, src []byte) { -+ const ( -+ inputMargin = 12 - 1 -+ minNonLiteralBlockSize = 1 + 1 + inputMargin -+ ) -+ -+ // Protect against e.cur wraparound. -+ for e.cur >= bufferReset { -+ if len(e.hist) == 0 { -+ for i := range e.table[:] { -+ e.table[i] = tableEntry{} -+ } -+ e.cur = maxMatchOffset -+ break -+ } -+ // Shift down everything in the table that isn't already too far away. -+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset -+ for i := range e.table[:] { -+ v := e.table[i].offset -+ if v <= minOff { -+ v = 0 -+ } else { -+ v = v - e.cur + maxMatchOffset -+ } -+ e.table[i].offset = v -+ } -+ e.cur = maxMatchOffset -+ } -+ -+ s := e.addBlock(src) -+ -+ // This check isn't in the Snappy implementation, but there, the caller -+ // instead of the callee handles this case. -+ if len(src) < minNonLiteralBlockSize { -+ // We do not fill the token table. -+ // This will be picked up by caller. -+ dst.n = uint16(len(src)) -+ return -+ } -+ -+ // Override src -+ src = e.hist -+ nextEmit := s -+ -+ // sLimit is when to stop looking for offset/length copies. The inputMargin -+ // lets us use a fast path for emitLiteral in the main loop, while we are -+ // looking for copies. -+ sLimit := int32(len(src) - inputMargin) -+ -+ // nextEmit is where in src the next emitLiteral should start from. -+ cv := load3232(src, s) -+ for { -+ // When should we start skipping if we haven't found matches in a long while. -+ const skipLog = 5 -+ const doEvery = 2 -+ -+ nextS := s -+ var candidate tableEntry -+ for { -+ nextHash := hash4u(cv, bTableBits) -+ s = nextS -+ nextS = s + doEvery + (s-nextEmit)>>skipLog -+ if nextS > sLimit { -+ goto emitRemainder -+ } -+ candidate = e.table[nextHash] -+ now := load6432(src, nextS) -+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} -+ nextHash = hash4u(uint32(now), bTableBits) -+ -+ offset := s - (candidate.offset - e.cur) -+ if offset < maxMatchOffset && cv == candidate.val { -+ e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)} -+ break -+ } -+ -+ // Do one right away... -+ cv = uint32(now) -+ s = nextS -+ nextS++ -+ candidate = e.table[nextHash] -+ now >>= 8 -+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} -+ -+ offset = s - (candidate.offset - e.cur) -+ if offset < maxMatchOffset && cv == candidate.val { -+ break -+ } -+ cv = uint32(now) -+ } -+ -+ // A 4-byte match has been found. We'll later see if more than 4 bytes -+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit -+ // them as literal bytes. -+ -+ // Call emitCopy, and then see if another emitCopy could be our next -+ // move. Repeat until we find no match for the input immediately after -+ // what was consumed by the last emitCopy call. -+ // -+ // If we exit this loop normally then we need to call emitLiteral next, -+ // though we don't yet know how big the literal will be. We handle that -+ // by proceeding to the next iteration of the main loop. We also can -+ // exit this loop via goto if we get close to exhausting the input. -+ for { -+ // Invariant: we have a 4-byte match at s, and no need to emit any -+ // literal bytes prior to s. -+ -+ // Extend the 4-byte match as long as possible. -+ t := candidate.offset - e.cur -+ l := e.matchlenLong(s+4, t+4, src) + 4 -+ -+ // Extend backwards -+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] { -+ s-- -+ t-- -+ l++ -+ } -+ if nextEmit < s { -+ emitLiteral(dst, src[nextEmit:s]) -+ } -+ -+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) -+ s += l -+ nextEmit = s -+ if nextS >= s { -+ s = nextS + 1 -+ } -+ -+ if s >= sLimit { -+ // Index first pair after match end. -+ if int(s+l+4) < len(src) { -+ cv := load3232(src, s) -+ e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur, val: cv} -+ } -+ goto emitRemainder -+ } -+ -+ // Store every second hash in-between, but offset by 1. -+ for i := s - l + 2; i < s-5; i += 7 { -+ x := load6432(src, int32(i)) -+ nextHash := hash4u(uint32(x), bTableBits) -+ e.table[nextHash] = tableEntry{offset: e.cur + i, val: uint32(x)} -+ // Skip one -+ x >>= 16 -+ nextHash = hash4u(uint32(x), bTableBits) -+ e.table[nextHash] = tableEntry{offset: e.cur + i + 2, val: uint32(x)} -+ // Skip one -+ x >>= 16 -+ nextHash = hash4u(uint32(x), bTableBits) -+ e.table[nextHash] = tableEntry{offset: e.cur + i + 4, val: uint32(x)} -+ } -+ -+ // We could immediately start working at s now, but to improve -+ // compression we first update the hash table at s-2 to s. If -+ // another emitCopy is not our next move, also calculate nextHash -+ // at s+1. At least on GOARCH=amd64, these three hash calculations -+ // are faster as one load64 call (with some shifts) instead of -+ // three load32 calls. -+ x := load6432(src, s-2) -+ o := e.cur + s - 2 -+ prevHash := hash4u(uint32(x), bTableBits) -+ prevHash2 := hash4u(uint32(x>>8), bTableBits) -+ e.table[prevHash] = tableEntry{offset: o, val: uint32(x)} -+ e.table[prevHash2] = tableEntry{offset: o + 1, val: uint32(x >> 8)} -+ currHash := hash4u(uint32(x>>16), bTableBits) -+ candidate = e.table[currHash] -+ e.table[currHash] = tableEntry{offset: o + 2, val: uint32(x >> 16)} -+ -+ offset := s - (candidate.offset - e.cur) -+ if offset > maxMatchOffset || uint32(x>>16) != candidate.val { -+ cv = uint32(x >> 24) -+ s++ -+ break -+ } -+ } -+ } -+ -+emitRemainder: -+ if int(nextEmit) < len(src) { -+ // If nothing was added, don't encode literals. -+ if dst.n == 0 { -+ return -+ } -+ -+ emitLiteral(dst, src[nextEmit:]) -+ } -+} -diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go -new file mode 100644 -index 0000000000000..4153d24c95fa4 ---- /dev/null -+++ b/vendor/github.com/klauspost/compress/flate/level3.go -@@ -0,0 +1,225 @@ -+package flate -+ -+// fastEncL3 -+type fastEncL3 struct { -+ fastGen -+ table [tableSize]tableEntryPrev -+} -+ -+// Encode uses a similar algorithm to level 2, will check up to two candidates. -+func (e *fastEncL3) Encode(dst *tokens, src []byte) { -+ const ( -+ inputMargin = 8 - 1 -+ minNonLiteralBlockSize = 1 + 1 + inputMargin -+ ) -+ -+ // Protect against e.cur wraparound. -+ for e.cur >= bufferReset { -+ if len(e.hist) == 0 { -+ for i := range e.table[:] { -+ e.table[i] = tableEntryPrev{} -+ } -+ e.cur = maxMatchOffset -+ break -+ } -+ // Shift down everything in the table that isn't already too far away. -+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset -+ for i := range e.table[:] { -+ v := e.table[i] -+ if v.Cur.offset <= minOff { -+ v.Cur.offset = 0 -+ } else { -+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset -+ } -+ if v.Prev.offset <= minOff { -+ v.Prev.offset = 0 -+ } else { -+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset -+ } -+ e.table[i] = v -+ } -+ e.cur = maxMatchOffset -+ } -+ -+ s := e.addBlock(src) -+ -+ // Skip if too small. -+ if len(src) < minNonLiteralBlockSize { -+ // We do not fill the token table. -+ // This will be picked up by caller. -+ dst.n = uint16(len(src)) -+ return -+ } -+ -+ // Override src -+ src = e.hist -+ nextEmit := s -+ -+ // sLimit is when to stop looking for offset/length copies. The inputMargin -+ // lets us use a fast path for emitLiteral in the main loop, while we are -+ // looking for copies. -+ sLimit := int32(len(src) - inputMargin) -+ -+ // nextEmit is where in src the next emitLiteral should start from. -+ cv := load3232(src, s) -+ for { -+ const skipLog = 6 -+ nextS := s -+ var candidate tableEntry -+ for { -+ nextHash := hash(cv) -+ s = nextS -+ nextS = s + 1 + (s-nextEmit)>>skipLog -+ if nextS > sLimit { -+ goto emitRemainder -+ } -+ candidates := e.table[nextHash] -+ now := load3232(src, nextS) -+ e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} -+ -+ // Check both candidates -+ candidate = candidates.Cur -+ offset := s - (candidate.offset - e.cur) -+ if cv == candidate.val { -+ if offset > maxMatchOffset { -+ cv = now -+ // Previous will also be invalid, we have nothing. -+ continue -+ } -+ o2 := s - (candidates.Prev.offset - e.cur) -+ if cv != candidates.Prev.val || o2 > maxMatchOffset { -+ break -+ } -+ // Both match and are valid, pick longest. -+ l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) -+ if l2 > l1 { -+ candidate = candidates.Prev -+ } -+ break -+ } else { -+ // We only check if value mismatches. -+ // Offset will always be invalid in other cases. -+ candidate = candidates.Prev -+ if cv == candidate.val { -+ offset := s - (candidate.offset - e.cur) -+ if offset <= maxMatchOffset { -+ break -+ } -+ } -+ } -+ cv = now -+ } -+ -+ // Call emitCopy, and then see if another emitCopy could be our next -+ // move. Repeat until we find no match for the input immediately after -+ // what was consumed by the last emitCopy call. -+ // -+ // If we exit this loop normally then we need to call emitLiteral next, -+ // though we don't yet know how big the literal will be. We handle that -+ // by proceeding to the next iteration of the main loop. We also can -+ // exit this loop via goto if we get close to exhausting the input. -+ for { -+ // Invariant: we have a 4-byte match at s, and no need to emit any -+ // literal bytes prior to s. -+ -+ // Extend the 4-byte match as long as possible. -+ // -+ t := candidate.offset - e.cur -+ l := e.matchlenLong(s+4, t+4, src) + 4 -+ -+ // Extend backwards -+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] { -+ s-- -+ t-- -+ l++ -+ } -+ if nextEmit < s { -+ emitLiteral(dst, src[nextEmit:s]) -+ } -+ -+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) -+ s += l -+ nextEmit = s -+ if nextS >= s { -+ s = nextS + 1 -+ } -+ -+ if s >= sLimit { -+ t += l -+ // Index first pair after match end. -+ if int(t+4) < len(src) && t > 0 { -+ cv := load3232(src, t) -+ nextHash := hash(cv) -+ e.table[nextHash] = tableEntryPrev{ -+ Prev: e.table[nextHash].Cur, -+ Cur: tableEntry{offset: e.cur + t, val: cv}, -+ } -+ } -+ goto emitRemainder -+ } -+ -+ // We could immediately start working at s now, but to improve -+ // compression we first update the hash table at s-3 to s. -+ x := load6432(src, s-3) -+ prevHash := hash(uint32(x)) -+ e.table[prevHash] = tableEntryPrev{ -+ Prev: e.table[prevHash].Cur, -+ Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, -+ } -+ x >>= 8 -+ prevHash = hash(uint32(x)) -+ -+ e.table[prevHash] = tableEntryPrev{ -+ Prev: e.table[prevHash].Cur, -+ Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, -+ } -+ x >>= 8 -+ prevHash = hash(uint32(x)) -+ -+ e.table[prevHash] = tableEntryPrev{ -+ Prev: e.table[prevHash].Cur, -+ Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, -+ } -+ x >>= 8 -+ currHash := hash(uint32(x)) -+ candidates := e.table[currHash] -+ cv = uint32(x) -+ e.table[currHash] = tableEntryPrev{ -+ Prev: candidates.Cur, -+ Cur: tableEntry{offset: s + e.cur, val: cv}, -+ } -+ -+ // Check both candidates -+ candidate = candidates.Cur -+ if cv == candidate.val { -+ offset := s - (candidate.offset - e.cur) -+ if offset <= maxMatchOffset { -+ continue -+ } -+ } else { -+ // We only check if value mismatches. -+ // Offset will always be invalid in other cases. -+ candidate = candidates.Prev -+ if cv == candidate.val { -+ offset := s - (candidate.offset - e.cur) -+ if offset <= maxMatchOffset { -+ continue -+ } -+ } -+ } -+ cv = uint32(x >> 8) -+ s++ -+ break -+ } -+ } -+ -+emitRemainder: -+ if int(nextEmit) < len(src) { -+ // If nothing was added, don't encode literals. -+ if dst.n == 0 { -+ return -+ } -+ -+ emitLiteral(dst, src[nextEmit:]) -+ } -+} -diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go -new file mode 100644 -index 0000000000000..c689ac771b823 ---- /dev/null -+++ b/vendor/github.com/klauspost/compress/flate/level4.go -@@ -0,0 +1,210 @@ -+package flate -+ -+import ""fmt"" -+ -+type fastEncL4 struct { -+ fastGen -+ table [tableSize]tableEntry -+ bTable [tableSize]tableEntry -+} -+ -+func (e *fastEncL4) Encode(dst *tokens, src []byte) { -+ const ( -+ inputMargin = 12 - 1 -+ minNonLiteralBlockSize = 1 + 1 + inputMargin -+ ) -+ -+ // Protect against e.cur wraparound. -+ for e.cur >= bufferReset { -+ if len(e.hist) == 0 { -+ for i := range e.table[:] { -+ e.table[i] = tableEntry{} -+ } -+ for i := range e.bTable[:] { -+ e.bTable[i] = tableEntry{} -+ } -+ e.cur = maxMatchOffset -+ break -+ } -+ // Shift down everything in the table that isn't already too far away. -+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset -+ for i := range e.table[:] { -+ v := e.table[i].offset -+ if v <= minOff { -+ v = 0 -+ } else { -+ v = v - e.cur + maxMatchOffset -+ } -+ e.table[i].offset = v -+ } -+ for i := range e.bTable[:] { -+ v := e.bTable[i].offset -+ if v <= minOff { -+ v = 0 -+ } else { -+ v = v - e.cur + maxMatchOffset -+ } -+ e.bTable[i].offset = v -+ } -+ e.cur = maxMatchOffset -+ } -+ -+ s := e.addBlock(src) -+ -+ // This check isn't in the Snappy implementation, but there, the caller -+ // instead of the callee handles this case. -+ if len(src) < minNonLiteralBlockSize { -+ // We do not fill the token table. -+ // This will be picked up by caller. -+ dst.n = uint16(len(src)) -+ return -+ } -+ -+ // Override src -+ src = e.hist -+ nextEmit := s -+ -+ // sLimit is when to stop looking for offset/length copies. The inputMargin -+ // lets us use a fast path for emitLiteral in the main loop, while we are -+ // looking for copies. -+ sLimit := int32(len(src) - inputMargin) -+ -+ // nextEmit is where in src the next emitLiteral should start from. -+ cv := load6432(src, s) -+ for { -+ const skipLog = 6 -+ const doEvery = 1 -+ -+ nextS := s -+ var t int32 -+ for { -+ nextHashS := hash4x64(cv, tableBits) -+ nextHashL := hash7(cv, tableBits) -+ -+ s = nextS -+ nextS = s + doEvery + (s-nextEmit)>>skipLog -+ if nextS > sLimit { -+ goto emitRemainder -+ } -+ // Fetch a short+long candidate -+ sCandidate := e.table[nextHashS] -+ lCandidate := e.bTable[nextHashL] -+ next := load6432(src, nextS) -+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)} -+ e.table[nextHashS] = entry -+ e.bTable[nextHashL] = entry -+ -+ t = lCandidate.offset - e.cur -+ if s-t < maxMatchOffset && uint32(cv) == lCandidate.val { -+ // We got a long match. Use that. -+ break -+ } -+ -+ t = sCandidate.offset - e.cur -+ if s-t < maxMatchOffset && uint32(cv) == sCandidate.val { -+ // Found a 4 match... -+ lCandidate = e.bTable[hash7(next, tableBits)] -+ -+ // If the next long is a candidate, check if we should use that instead... -+ lOff := nextS - (lCandidate.offset - e.cur) -+ if lOff < maxMatchOffset && lCandidate.val == uint32(next) { -+ l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) -+ if l2 > l1 { -+ s = nextS -+ t = lCandidate.offset - e.cur -+ } -+ } -+ break -+ } -+ cv = next -+ } -+ -+ // A 4-byte match has been found. We'll later see if more than 4 bytes -+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit -+ // them as literal bytes. -+ -+ // Extend the 4-byte match as long as possible. -+ l := e.matchlenLong(s+4, t+4, src) + 4 -+ -+ // Extend backwards -+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] { -+ s-- -+ t-- -+ l++ -+ } -+ if nextEmit < s { -+ emitLiteral(dst, src[nextEmit:s]) -+ } -+ if false { -+ if t >= s { -+ panic(""s-t"") -+ } -+ if (s - t) > maxMatchOffset { -+ panic(fmt.Sprintln(""mmo"", t)) -+ } -+ if l < baseMatchLength { -+ panic(""bml"") -+ } -+ } -+ -+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) -+ s += l -+ nextEmit = s -+ if nextS >= s { -+ s = nextS + 1 -+ } -+ -+ if s >= sLimit { -+ // Index first pair after match end. -+ if int(s+8) < len(src) { -+ cv := load6432(src, s) -+ e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur, val: uint32(cv)} -+ e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur, val: uint32(cv)} -+ } -+ goto emitRemainder -+ } -+ -+ // Store every 3rd hash in-between -+ if true { -+ i := nextS -+ if i < s-1 { -+ cv := load6432(src, i) -+ t := tableEntry{offset: i + e.cur, val: uint32(cv)} -+ t2 := tableEntry{val: uint32(cv >> 8), offset: t.offset + 1} -+ e.bTable[hash7(cv, tableBits)] = t -+ e.bTable[hash7(cv>>8, tableBits)] = t2 -+ e.table[hash4u(t2.val, tableBits)] = t2 -+ -+ i += 3 -+ for ; i < s-1; i += 3 { -+ cv := load6432(src, i) -+ t := tableEntry{offset: i + e.cur, val: uint32(cv)} -+ t2 := tableEntry{val: uint32(cv >> 8), offset: t.offset + 1} -+ e.bTable[hash7(cv, tableBits)] = t -+ e.bTable[hash7(cv>>8, tableBits)] = t2 -+ e.table[hash4u(t2.val, tableBits)] = t2 -+ } -+ } -+ } -+ -+ // We could immediately start working at s now, but to improve -+ // compression we first update the hash table at s-1 and at s. -+ x := load6432(src, s-1) -+ o := e.cur + s - 1 -+ prevHashS := hash4x64(x, tableBits) -+ prevHashL := hash7(x, tableBits) -+ e.table[prevHashS] = tableEntry{offset: o, val: uint32(x)} -+ e.bTable[prevHashL] = tableEntry{offset: o, val: uint32(x)} -+ cv = x >> 8 -+ } -+ -+emitRemainder: -+ if int(nextEmit) < len(src) { -+ // If nothing was added, don't encode literals. -+ if dst.n == 0 { -+ return -+ } -+ -+ emitLiteral(dst, src[nextEmit:]) -+ } -+} -diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go -new file mode 100644 -index 0000000000000..14a2356126aad ---- /dev/null -+++ b/vendor/github.com/klauspost/compress/flate/level5.go -@@ -0,0 +1,276 @@ -+package flate -+ -+import ""fmt"" -+ -+type fastEncL5 struct { -+ fastGen -+ table [tableSize]tableEntry -+ bTable [tableSize]tableEntryPrev -+} -+ -+func (e *fastEncL5) Encode(dst *tokens, src []byte) { -+ const ( -+ inputMargin = 12 - 1 -+ minNonLiteralBlockSize = 1 + 1 + inputMargin -+ ) -+ -+ // Protect against e.cur wraparound. -+ for e.cur >= bufferReset { -+ if len(e.hist) == 0 { -+ for i := range e.table[:] { -+ e.table[i] = tableEntry{} -+ } -+ for i := range e.bTable[:] { -+ e.bTable[i] = tableEntryPrev{} -+ } -+ e.cur = maxMatchOffset -+ break -+ } -+ // Shift down everything in the table that isn't already too far away. -+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset -+ for i := range e.table[:] { -+ v := e.table[i].offset -+ if v <= minOff { -+ v = 0 -+ } else { -+ v = v - e.cur + maxMatchOffset -+ } -+ e.table[i].offset = v -+ } -+ for i := range e.bTable[:] { -+ v := e.bTable[i] -+ if v.Cur.offset <= minOff { -+ v.Cur.offset = 0 -+ v.Prev.offset = 0 -+ } else { -+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset -+ if v.Prev.offset <= minOff { -+ v.Prev.offset = 0 -+ } else { -+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset -+ } -+ } -+ e.bTable[i] = v -+ } -+ e.cur = maxMatchOffset -+ } -+ -+ s := e.addBlock(src) -+ -+ // This check isn't in the Snappy implementation, but there, the caller -+ // instead of the callee handles this case. -+ if len(src) < minNonLiteralBlockSize { -+ // We do not fill the token table. -+ // This will be picked up by caller. -+ dst.n = uint16(len(src)) -+ return -+ } -+ -+ // Override src -+ src = e.hist -+ nextEmit := s -+ -+ // sLimit is when to stop looking for offset/length copies. The inputMargin -+ // lets us use a fast path for emitLiteral in the main loop, while we are -+ // looking for copies. -+ sLimit := int32(len(src) - inputMargin) -+ -+ // nextEmit is where in src the next emitLiteral should start from. -+ cv := load6432(src, s) -+ for { -+ const skipLog = 6 -+ const doEvery = 1 -+ -+ nextS := s -+ var l int32 -+ var t int32 -+ for { -+ nextHashS := hash4x64(cv, tableBits) -+ nextHashL := hash7(cv, tableBits) -+ -+ s = nextS -+ nextS = s + doEvery + (s-nextEmit)>>skipLog -+ if nextS > sLimit { -+ goto emitRemainder -+ } -+ // Fetch a short+long candidate -+ sCandidate := e.table[nextHashS] -+ lCandidate := e.bTable[nextHashL] -+ next := load6432(src, nextS) -+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)} -+ e.table[nextHashS] = entry -+ eLong := &e.bTable[nextHashL] -+ eLong.Cur, eLong.Prev = entry, eLong.Cur -+ -+ nextHashS = hash4x64(next, tableBits) -+ nextHashL = hash7(next, tableBits) -+ -+ t = lCandidate.Cur.offset - e.cur -+ if s-t < maxMatchOffset { -+ if uint32(cv) == lCandidate.Cur.val { -+ // Store the next match -+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} -+ eLong := &e.bTable[nextHashL] -+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur -+ -+ t2 := lCandidate.Prev.offset - e.cur -+ if s-t2 < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { -+ l = e.matchlen(s+4, t+4, src) + 4 -+ ml1 := e.matchlen(s+4, t2+4, src) + 4 -+ if ml1 > l { -+ t = t2 -+ l = ml1 -+ break -+ } -+ } -+ break -+ } -+ t = lCandidate.Prev.offset - e.cur -+ if s-t < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { -+ // Store the next match -+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} -+ eLong := &e.bTable[nextHashL] -+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur -+ break -+ } -+ } -+ -+ t = sCandidate.offset - e.cur -+ if s-t < maxMatchOffset && uint32(cv) == sCandidate.val { -+ // Found a 4 match... -+ l = e.matchlen(s+4, t+4, src) + 4 -+ lCandidate = e.bTable[nextHashL] -+ // Store the next match -+ -+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} -+ eLong := &e.bTable[nextHashL] -+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur -+ -+ // If the next long is a candidate, use that... -+ t2 := lCandidate.Cur.offset - e.cur -+ if nextS-t2 < maxMatchOffset { -+ if lCandidate.Cur.val == uint32(next) { -+ ml := e.matchlen(nextS+4, t2+4, src) + 4 -+ if ml > l { -+ t = t2 -+ s = nextS -+ l = ml -+ break -+ } -+ } -+ // If the previous long is a candidate, use that... -+ t2 = lCandidate.Prev.offset - e.cur -+ if nextS-t2 < maxMatchOffset && lCandidate.Prev.val == uint32(next) { -+ ml := e.matchlen(nextS+4, t2+4, src) + 4 -+ if ml > l { -+ t = t2 -+ s = nextS -+ l = ml -+ break -+ } -+ } -+ } -+ break -+ } -+ cv = next -+ } -+ -+ // A 4-byte match has been found. We'll later see if more than 4 bytes -+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit -+ // them as literal bytes. -+ -+ // Extend the 4-byte match as long as possible. -+ if l == 0 { -+ l = e.matchlenLong(s+4, t+4, src) + 4 -+ } else if l == maxMatchLength { -+ l += e.matchlenLong(s+l, t+l, src) -+ } -+ // Extend backwards -+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] { -+ s-- -+ t-- -+ l++ -+ } -+ if nextEmit < s { -+ emitLiteral(dst, src[nextEmit:s]) -+ } -+ if false { -+ if t >= s { -+ panic(fmt.Sprintln(""s-t"", s, t)) -+ } -+ if (s - t) > maxMatchOffset { -+ panic(fmt.Sprintln(""mmo"", s-t)) -+ } -+ if l < baseMatchLength { -+ panic(""bml"") -+ } -+ } -+ -+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) -+ s += l -+ nextEmit = s -+ if nextS >= s { -+ s = nextS + 1 -+ } -+ -+ if s >= sLimit { -+ goto emitRemainder -+ } -+ -+ // Store every 3rd hash in-between. -+ if true { -+ const hashEvery = 3 -+ i := s - l + 1 -+ if i < s-1 { -+ cv := load6432(src, i) -+ t := tableEntry{offset: i + e.cur, val: uint32(cv)} -+ e.table[hash4x64(cv, tableBits)] = t -+ eLong := &e.bTable[hash7(cv, tableBits)] -+ eLong.Cur, eLong.Prev = t, eLong.Cur -+ -+ // Do an long at i+1 -+ cv >>= 8 -+ t = tableEntry{offset: t.offset + 1, val: uint32(cv)} -+ eLong = &e.bTable[hash7(cv, tableBits)] -+ eLong.Cur, eLong.Prev = t, eLong.Cur -+ -+ // We only have enough bits for a short entry at i+2 -+ cv >>= 8 -+ t = tableEntry{offset: t.offset + 1, val: uint32(cv)} -+ e.table[hash4x64(cv, tableBits)] = t -+ -+ // Skip one - otherwise we risk hitting 's' -+ i += 4 -+ for ; i < s-1; i += hashEvery { -+ cv := load6432(src, i) -+ t := tableEntry{offset: i + e.cur, val: uint32(cv)} -+ t2 := tableEntry{offset: t.offset + 1, val: uint32(cv >> 8)} -+ eLong := &e.bTable[hash7(cv, tableBits)] -+ eLong.Cur, eLong.Prev = t, eLong.Cur -+ e.table[hash4u(t2.val, tableBits)] = t2 -+ } -+ } -+ } -+ -+ // We could immediately start working at s now, but to improve -+ // compression we first update the hash table at s-1 and at s. -+ x := load6432(src, s-1) -+ o := e.cur + s - 1 -+ prevHashS := hash4x64(x, tableBits) -+ prevHashL := hash7(x, tableBits) -+ e.table[prevHashS] = tableEntry{offset: o, val: uint32(x)} -+ eLong := &e.bTable[prevHashL] -+ eLong.Cur, eLong.Prev = tableEntry{offset: o, val: uint32(x)}, eLong.Cur -+ cv = x >> 8 -+ } -+ -+emitRemainder: -+ if int(nextEmit) < len(src) { -+ // If nothing was added, don't encode literals. -+ if dst.n == 0 { -+ return -+ } -+ -+ emitLiteral(dst, src[nextEmit:]) -+ } -+} -diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go -new file mode 100644 -index 0000000000000..cad0c7df7fc3c ---- /dev/null -+++ b/vendor/github.com/klauspost/compress/flate/level6.go -@@ -0,0 +1,279 @@ -+package flate -+ -+import ""fmt"" -+ -+type fastEncL6 struct { -+ fastGen -+ table [tableSize]tableEntry -+ bTable [tableSize]tableEntryPrev -+} -+ -+func (e *fastEncL6) Encode(dst *tokens, src []byte) { -+ const ( -+ inputMargin = 12 - 1 -+ minNonLiteralBlockSize = 1 + 1 + inputMargin -+ ) -+ -+ // Protect against e.cur wraparound. -+ for e.cur >= bufferReset { -+ if len(e.hist) == 0 { -+ for i := range e.table[:] { -+ e.table[i] = tableEntry{} -+ } -+ for i := range e.bTable[:] { -+ e.bTable[i] = tableEntryPrev{} -+ } -+ e.cur = maxMatchOffset -+ break -+ } -+ // Shift down everything in the table that isn't already too far away. -+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset -+ for i := range e.table[:] { -+ v := e.table[i].offset -+ if v <= minOff { -+ v = 0 -+ } else { -+ v = v - e.cur + maxMatchOffset -+ } -+ e.table[i].offset = v -+ } -+ for i := range e.bTable[:] { -+ v := e.bTable[i] -+ if v.Cur.offset <= minOff { -+ v.Cur.offset = 0 -+ v.Prev.offset = 0 -+ } else { -+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset -+ if v.Prev.offset <= minOff { -+ v.Prev.offset = 0 -+ } else { -+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset -+ } -+ } -+ e.bTable[i] = v -+ } -+ e.cur = maxMatchOffset -+ } -+ -+ s := e.addBlock(src) -+ -+ // This check isn't in the Snappy implementation, but there, the caller -+ // instead of the callee handles this case. -+ if len(src) < minNonLiteralBlockSize { -+ // We do not fill the token table. -+ // This will be picked up by caller. -+ dst.n = uint16(len(src)) -+ return -+ } -+ -+ // Override src -+ src = e.hist -+ nextEmit := s -+ -+ // sLimit is when to stop looking for offset/length copies. The inputMargin -+ // lets us use a fast path for emitLiteral in the main loop, while we are -+ // looking for copies. -+ sLimit := int32(len(src) - inputMargin) -+ -+ // nextEmit is where in src the next emitLiteral should start from. -+ cv := load6432(src, s) -+ // Repeat MUST be > 1 and within range -+ repeat := int32(1) -+ for { -+ const skipLog = 7 -+ const doEvery = 1 -+ -+ nextS := s -+ var l int32 -+ var t int32 -+ for { -+ nextHashS := hash4x64(cv, tableBits) -+ nextHashL := hash7(cv, tableBits) -+ s = nextS -+ nextS = s + doEvery + (s-nextEmit)>>skipLog -+ if nextS > sLimit { -+ goto emitRemainder -+ } -+ // Fetch a short+long candidate -+ sCandidate := e.table[nextHashS] -+ lCandidate := e.bTable[nextHashL] -+ next := load6432(src, nextS) -+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)} -+ e.table[nextHashS] = entry -+ eLong := &e.bTable[nextHashL] -+ eLong.Cur, eLong.Prev = entry, eLong.Cur -+ -+ // Calculate hashes of 'next' -+ nextHashS = hash4x64(next, tableBits) -+ nextHashL = hash7(next, tableBits) -+ -+ t = lCandidate.Cur.offset - e.cur -+ if s-t < maxMatchOffset { -+ if uint32(cv) == lCandidate.Cur.val { -+ // Long candidate matches at least 4 bytes. -+ -+ // Store the next match -+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} -+ eLong := &e.bTable[nextHashL] -+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur -+ -+ // Check the previous long candidate as well. -+ t2 := lCandidate.Prev.offset - e.cur -+ if s-t2 < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { -+ l = e.matchlen(s+4, t+4, src) + 4 -+ ml1 := e.matchlen(s+4, t2+4, src) + 4 -+ if ml1 > l { -+ t = t2 -+ l = ml1 -+ break -+ } -+ } -+ break -+ } -+ // Current value did not match, but check if previous long value does. -+ t = lCandidate.Prev.offset - e.cur -+ if s-t < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { -+ // Store the next match -+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} -+ eLong := &e.bTable[nextHashL] -+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur -+ break -+ } -+ } -+ -+ t = sCandidate.offset - e.cur -+ if s-t < maxMatchOffset && uint32(cv) == sCandidate.val { -+ // Found a 4 match... -+ l = e.matchlen(s+4, t+4, src) + 4 -+ -+ // Look up next long candidate (at nextS) -+ lCandidate = e.bTable[nextHashL] -+ -+ // Store the next match -+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} -+ eLong := &e.bTable[nextHashL] -+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur -+ -+ // Check repeat at s + repOff -+ const repOff = 1 -+ t2 := s - repeat + repOff -+ if load3232(src, t2) == uint32(cv>>(8*repOff)) { -+ ml := e.matchlen(s+4+repOff, t2+4, src) + 4 -+ if ml > l { -+ t = t2 -+ l = ml -+ s += repOff -+ // Not worth checking more. -+ break -+ } -+ } -+ -+ // If the next long is a candidate, use that... -+ t2 = lCandidate.Cur.offset - e.cur -+ if nextS-t2 < maxMatchOffset { -+ if lCandidate.Cur.val == uint32(next) { -+ ml := e.matchlen(nextS+4, t2+4, src) + 4 -+ if ml > l { -+ t = t2 -+ s = nextS -+ l = ml -+ // This is ok, but check previous as well. -+ } -+ } -+ // If the previous long is a candidate, use that... -+ t2 = lCandidate.Prev.offset - e.cur -+ if nextS-t2 < maxMatchOffset && lCandidate.Prev.val == uint32(next) { -+ ml := e.matchlen(nextS+4, t2+4, src) + 4 -+ if ml > l { -+ t = t2 -+ s = nextS -+ l = ml -+ break -+ } -+ } -+ } -+ break -+ } -+ cv = next -+ } -+ -+ // A 4-byte match has been found. We'll later see if more than 4 bytes -+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit -+ // them as literal bytes. -+ -+ // Extend the 4-byte match as long as possible. -+ if l == 0 { -+ l = e.matchlenLong(s+4, t+4, src) + 4 -+ } else if l == maxMatchLength { -+ l += e.matchlenLong(s+l, t+l, src) -+ } -+ -+ // Extend backwards -+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] { -+ s-- -+ t-- -+ l++ -+ } -+ if nextEmit < s { -+ emitLiteral(dst, src[nextEmit:s]) -+ } -+ if false { -+ if t >= s { -+ panic(fmt.Sprintln(""s-t"", s, t)) -+ } -+ if (s - t) > maxMatchOffset { -+ panic(fmt.Sprintln(""mmo"", s-t)) -+ } -+ if l < baseMatchLength { -+ panic(""bml"") -+ } -+ } -+ -+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) -+ repeat = s - t -+ s += l -+ nextEmit = s -+ if nextS >= s { -+ s = nextS + 1 -+ } -+ -+ if s >= sLimit { -+ // Index after match end. -+ for i := nextS + 1; i < int32(len(src))-8; i += 2 { -+ cv := load6432(src, i) -+ e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur, val: uint32(cv)} -+ eLong := &e.bTable[hash7(cv, tableBits)] -+ eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur, val: uint32(cv)}, eLong.Cur -+ } -+ goto emitRemainder -+ } -+ -+ // Store every long hash in-between and every second short. -+ if true { -+ for i := nextS + 1; i < s-1; i += 2 { -+ cv := load6432(src, i) -+ t := tableEntry{offset: i + e.cur, val: uint32(cv)} -+ t2 := tableEntry{offset: t.offset + 1, val: uint32(cv >> 8)} -+ eLong := &e.bTable[hash7(cv, tableBits)] -+ eLong2 := &e.bTable[hash7(cv>>8, tableBits)] -+ e.table[hash4x64(cv, tableBits)] = t -+ eLong.Cur, eLong.Prev = t, eLong.Cur -+ eLong2.Cur, eLong2.Prev = t2, eLong2.Cur -+ } -+ } -+ -+ // We could immediately start working at s now, but to improve -+ // compression we first update the hash table at s-1 and at s. -+ cv = load6432(src, s) -+ } -+ -+emitRemainder: -+ if int(nextEmit) < len(src) { -+ // If nothing was added, don't encode literals. -+ if dst.n == 0 { -+ return -+ } -+ -+ emitLiteral(dst, src[nextEmit:]) -+ } -+} -diff --git a/vendor/github.com/klauspost/compress/flate/reverse_bits.go b/vendor/github.com/klauspost/compress/flate/reverse_bits.go -deleted file mode 100644 -index c1a02720d1a9b..0000000000000 ---- a/vendor/github.com/klauspost/compress/flate/reverse_bits.go -+++ /dev/null -@@ -1,48 +0,0 @@ --// Copyright 2009 The Go Authors. All rights reserved. --// Use of this source code is governed by a BSD-style --// license that can be found in the LICENSE file. -- --package flate -- --var reverseByte = [256]byte{ -- 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, -- 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, -- 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, -- 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, -- 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, -- 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, -- 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, -- 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, -- 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, -- 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, -- 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, -- 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, -- 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, -- 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, -- 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, -- 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, -- 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, -- 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, -- 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, -- 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, -- 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, -- 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, -- 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, -- 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, -- 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, -- 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, -- 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, -- 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, -- 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, -- 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, -- 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, -- 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, --} -- --func reverseUint16(v uint16) uint16 { -- return uint16(reverseByte[v>>8]) | uint16(reverseByte[v&0xFF])<<8 --} -- --func reverseBits(number uint16, bitLength byte) uint16 { -- return reverseUint16(number << uint8(16-bitLength)) --} -diff --git a/vendor/github.com/klauspost/compress/flate/snappy.go b/vendor/github.com/klauspost/compress/flate/snappy.go -deleted file mode 100644 -index aebebd5248f91..0000000000000 ---- a/vendor/github.com/klauspost/compress/flate/snappy.go -+++ /dev/null -@@ -1,900 +0,0 @@ --// Copyright 2011 The Snappy-Go Authors. All rights reserved. --// Modified for deflate by Klaus Post (c) 2015. --// Use of this source code is governed by a BSD-style --// license that can be found in the LICENSE file. -- --package flate -- --// emitLiteral writes a literal chunk and returns the number of bytes written. --func emitLiteral(dst *tokens, lit []byte) { -- ol := int(dst.n) -- for i, v := range lit { -- dst.tokens[(i+ol)&maxStoreBlockSize] = token(v) -- } -- dst.n += uint16(len(lit)) --} -- --// emitCopy writes a copy chunk and returns the number of bytes written. --func emitCopy(dst *tokens, offset, length int) { -- dst.tokens[dst.n] = matchToken(uint32(length-3), uint32(offset-minOffsetSize)) -- dst.n++ --} -- --type fastEnc interface { -- Encode(dst *tokens, src []byte) -- Reset() --} -- --func newFastEnc(level int) fastEnc { -- switch level { -- case 1: -- return &snappyL1{} -- case 2: -- return &snappyL2{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}} -- case 3: -- return &snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}} -- case 4: -- return &snappyL4{snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}} -- default: -- panic(""invalid level specified"") -- } --} -- --const ( -- tableBits = 14 // Bits used in the table -- tableSize = 1 << tableBits // Size of the table -- tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. -- tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. -- baseMatchOffset = 1 // The smallest match offset -- baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 -- maxMatchOffset = 1 << 15 // The largest match offset --) -- --func load32(b []byte, i int) uint32 { -- b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. -- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 --} -- --func load64(b []byte, i int) uint64 { -- b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. -- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | -- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 --} -- --func hash(u uint32) uint32 { -- return (u * 0x1e35a7bd) >> tableShift --} -- --// snappyL1 encapsulates level 1 compression --type snappyL1 struct{} -- --func (e *snappyL1) Reset() {} -- --func (e *snappyL1) Encode(dst *tokens, src []byte) { -- const ( -- inputMargin = 16 - 1 -- minNonLiteralBlockSize = 1 + 1 + inputMargin -- ) -- -- // This check isn't in the Snappy implementation, but there, the caller -- // instead of the callee handles this case. -- if len(src) < minNonLiteralBlockSize { -- // We do not fill the token table. -- // This will be picked up by caller. -- dst.n = uint16(len(src)) -- return -- } -- -- // Initialize the hash table. -- // -- // The table element type is uint16, as s < sLimit and sLimit < len(src) -- // and len(src) <= maxStoreBlockSize and maxStoreBlockSize == 65535. -- var table [tableSize]uint16 -- -- // sLimit is when to stop looking for offset/length copies. The inputMargin -- // lets us use a fast path for emitLiteral in the main loop, while we are -- // looking for copies. -- sLimit := len(src) - inputMargin -- -- // nextEmit is where in src the next emitLiteral should start from. -- nextEmit := 0 -- -- // The encoded form must start with a literal, as there are no previous -- // bytes to copy, so we start looking for hash matches at s == 1. -- s := 1 -- nextHash := hash(load32(src, s)) -- -- for { -- // Copied from the C++ snappy implementation: -- // -- // Heuristic match skipping: If 32 bytes are scanned with no matches -- // found, start looking only at every other byte. If 32 more bytes are -- // scanned (or skipped), look at every third byte, etc.. When a match -- // is found, immediately go back to looking at every byte. This is a -- // small loss (~5% performance, ~0.1% density) for compressible data -- // due to more bookkeeping, but for non-compressible data (such as -- // JPEG) it's a huge win since the compressor quickly ""realizes"" the -- // data is incompressible and doesn't bother looking for matches -- // everywhere. -- // -- // The ""skip"" variable keeps track of how many bytes there are since -- // the last match; dividing it by 32 (ie. right-shifting by five) gives -- // the number of bytes to move ahead for each iteration. -- skip := 32 -- -- nextS := s -- candidate := 0 -- for { -- s = nextS -- bytesBetweenHashLookups := skip >> 5 -- nextS = s + bytesBetweenHashLookups -- skip += bytesBetweenHashLookups -- if nextS > sLimit { -- goto emitRemainder -- } -- candidate = int(table[nextHash&tableMask]) -- table[nextHash&tableMask] = uint16(s) -- nextHash = hash(load32(src, nextS)) -- if s-candidate <= maxMatchOffset && load32(src, s) == load32(src, candidate) { -- break -- } -- } -- -- // A 4-byte match has been found. We'll later see if more than 4 bytes -- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit -- // them as literal bytes. -- emitLiteral(dst, src[nextEmit:s]) -- -- // Call emitCopy, and then see if another emitCopy could be our next -- // move. Repeat until we find no match for the input immediately after -- // what was consumed by the last emitCopy call. -- // -- // If we exit this loop normally then we need to call emitLiteral next, -- // though we don't yet know how big the literal will be. We handle that -- // by proceeding to the next iteration of the main loop. We also can -- // exit this loop via goto if we get close to exhausting the input. -- for { -- // Invariant: we have a 4-byte match at s, and no need to emit any -- // literal bytes prior to s. -- base := s -- -- // Extend the 4-byte match as long as possible. -- // -- // This is an inlined version of Snappy's: -- // s = extendMatch(src, candidate+4, s+4) -- s += 4 -- s1 := base + maxMatchLength -- if s1 > len(src) { -- s1 = len(src) -- } -- a := src[s:s1] -- b := src[candidate+4:] -- b = b[:len(a)] -- l := len(a) -- for i := range a { -- if a[i] != b[i] { -- l = i -- break -- } -- } -- s += l -- -- // matchToken is flate's equivalent of Snappy's emitCopy. -- dst.tokens[dst.n] = matchToken(uint32(s-base-baseMatchLength), uint32(base-candidate-baseMatchOffset)) -- dst.n++ -- nextEmit = s -- if s >= sLimit { -- goto emitRemainder -- } -- -- // We could immediately start working at s now, but to improve -- // compression we first update the hash table at s-1 and at s. If -- // another emitCopy is not our next move, also calculate nextHash -- // at s+1. At least on GOARCH=amd64, these three hash calculations -- // are faster as one load64 call (with some shifts) instead of -- // three load32 calls. -- x := load64(src, s-1) -- prevHash := hash(uint32(x >> 0)) -- table[prevHash&tableMask] = uint16(s - 1) -- currHash := hash(uint32(x >> 8)) -- candidate = int(table[currHash&tableMask]) -- table[currHash&tableMask] = uint16(s) -- if s-candidate > maxMatchOffset || uint32(x>>8) != load32(src, candidate) { -- nextHash = hash(uint32(x >> 16)) -- s++ -- break -- } -- } -- } -- --emitRemainder: -- if nextEmit < len(src) { -- emitLiteral(dst, src[nextEmit:]) -- } --} -- --type tableEntry struct { -- val uint32 -- offset int32 --} -- --func load3232(b []byte, i int32) uint32 { -- b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. -- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 --} -- --func load6432(b []byte, i int32) uint64 { -- b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. -- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | -- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 --} -- --// snappyGen maintains the table for matches, --// and the previous byte block for level 2. --// This is the generic implementation. --type snappyGen struct { -- prev []byte -- cur int32 --} -- --// snappyGen maintains the table for matches, --// and the previous byte block for level 2. --// This is the generic implementation. --type snappyL2 struct { -- snappyGen -- table [tableSize]tableEntry --} -- --// EncodeL2 uses a similar algorithm to level 1, but is capable --// of matching across blocks giving better compression at a small slowdown. --func (e *snappyL2) Encode(dst *tokens, src []byte) { -- const ( -- inputMargin = 8 - 1 -- minNonLiteralBlockSize = 1 + 1 + inputMargin -- ) -- -- // Protect against e.cur wraparound. -- if e.cur > 1<<30 { -- for i := range e.table[:] { -- e.table[i] = tableEntry{} -- } -- e.cur = maxStoreBlockSize -- } -- -- // This check isn't in the Snappy implementation, but there, the caller -- // instead of the callee handles this case. -- if len(src) < minNonLiteralBlockSize { -- // We do not fill the token table. -- // This will be picked up by caller. -- dst.n = uint16(len(src)) -- e.cur += maxStoreBlockSize -- e.prev = e.prev[:0] -- return -- } -- -- // sLimit is when to stop looking for offset/length copies. The inputMargin -- // lets us use a fast path for emitLiteral in the main loop, while we are -- // looking for copies. -- sLimit := int32(len(src) - inputMargin) -- -- // nextEmit is where in src the next emitLiteral should start from. -- nextEmit := int32(0) -- s := int32(0) -- cv := load3232(src, s) -- nextHash := hash(cv) -- -- for { -- // Copied from the C++ snappy implementation: -- // -- // Heuristic match skipping: If 32 bytes are scanned with no matches -- // found, start looking only at every other byte. If 32 more bytes are -- // scanned (or skipped), look at every third byte, etc.. When a match -- // is found, immediately go back to looking at every byte. This is a -- // small loss (~5% performance, ~0.1% density) for compressible data -- // due to more bookkeeping, but for non-compressible data (such as -- // JPEG) it's a huge win since the compressor quickly ""realizes"" the -- // data is incompressible and doesn't bother looking for matches -- // everywhere. -- // -- // The ""skip"" variable keeps track of how many bytes there are since -- // the last match; dividing it by 32 (ie. right-shifting by five) gives -- // the number of bytes to move ahead for each iteration. -- skip := int32(32) -- -- nextS := s -- var candidate tableEntry -- for { -- s = nextS -- bytesBetweenHashLookups := skip >> 5 -- nextS = s + bytesBetweenHashLookups -- skip += bytesBetweenHashLookups -- if nextS > sLimit { -- goto emitRemainder -- } -- candidate = e.table[nextHash&tableMask] -- now := load3232(src, nextS) -- e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv} -- nextHash = hash(now) -- -- offset := s - (candidate.offset - e.cur) -- if offset > maxMatchOffset || cv != candidate.val { -- // Out of range or not matched. -- cv = now -- continue -- } -- break -- } -- -- // A 4-byte match has been found. We'll later see if more than 4 bytes -- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit -- // them as literal bytes. -- emitLiteral(dst, src[nextEmit:s]) -- -- // Call emitCopy, and then see if another emitCopy could be our next -- // move. Repeat until we find no match for the input immediately after -- // what was consumed by the last emitCopy call. -- // -- // If we exit this loop normally then we need to call emitLiteral next, -- // though we don't yet know how big the literal will be. We handle that -- // by proceeding to the next iteration of the main loop. We also can -- // exit this loop via goto if we get close to exhausting the input. -- for { -- // Invariant: we have a 4-byte match at s, and no need to emit any -- // literal bytes prior to s. -- -- // Extend the 4-byte match as long as possible. -- // -- s += 4 -- t := candidate.offset - e.cur + 4 -- l := e.matchlen(s, t, src) -- -- // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) -- dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) -- dst.n++ -- s += l -- nextEmit = s -- if s >= sLimit { -- t += l -- // Index first pair after match end. -- if int(t+4) < len(src) && t > 0 { -- cv := load3232(src, t) -- e.table[hash(cv)&tableMask] = tableEntry{offset: t + e.cur, val: cv} -- } -- goto emitRemainder -- } -- -- // We could immediately start working at s now, but to improve -- // compression we first update the hash table at s-1 and at s. If -- // another emitCopy is not our next move, also calculate nextHash -- // at s+1. At least on GOARCH=amd64, these three hash calculations -- // are faster as one load64 call (with some shifts) instead of -- // three load32 calls. -- x := load6432(src, s-1) -- prevHash := hash(uint32(x)) -- e.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)} -- x >>= 8 -- currHash := hash(uint32(x)) -- candidate = e.table[currHash&tableMask] -- e.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)} -- -- offset := s - (candidate.offset - e.cur) -- if offset > maxMatchOffset || uint32(x) != candidate.val { -- cv = uint32(x >> 8) -- nextHash = hash(cv) -- s++ -- break -- } -- } -- } -- --emitRemainder: -- if int(nextEmit) < len(src) { -- emitLiteral(dst, src[nextEmit:]) -- } -- e.cur += int32(len(src)) -- e.prev = e.prev[:len(src)] -- copy(e.prev, src) --} -- --type tableEntryPrev struct { -- Cur tableEntry -- Prev tableEntry --} -- --// snappyL3 --type snappyL3 struct { -- snappyGen -- table [tableSize]tableEntryPrev --} -- --// Encode uses a similar algorithm to level 2, will check up to two candidates. --func (e *snappyL3) Encode(dst *tokens, src []byte) { -- const ( -- inputMargin = 8 - 1 -- minNonLiteralBlockSize = 1 + 1 + inputMargin -- ) -- -- // Protect against e.cur wraparound. -- if e.cur > 1<<30 { -- for i := range e.table[:] { -- e.table[i] = tableEntryPrev{} -- } -- e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]} -- } -- -- // This check isn't in the Snappy implementation, but there, the caller -- // instead of the callee handles this case. -- if len(src) < minNonLiteralBlockSize { -- // We do not fill the token table. -- // This will be picked up by caller. -- dst.n = uint16(len(src)) -- e.cur += maxStoreBlockSize -- e.prev = e.prev[:0] -- return -- } -- -- // sLimit is when to stop looking for offset/length copies. The inputMargin -- // lets us use a fast path for emitLiteral in the main loop, while we are -- // looking for copies. -- sLimit := int32(len(src) - inputMargin) -- -- // nextEmit is where in src the next emitLiteral should start from. -- nextEmit := int32(0) -- s := int32(0) -- cv := load3232(src, s) -- nextHash := hash(cv) -- -- for { -- // Copied from the C++ snappy implementation: -- // -- // Heuristic match skipping: If 32 bytes are scanned with no matches -- // found, start looking only at every other byte. If 32 more bytes are -- // scanned (or skipped), look at every third byte, etc.. When a match -- // is found, immediately go back to looking at every byte. This is a -- // small loss (~5% performance, ~0.1% density) for compressible data -- // due to more bookkeeping, but for non-compressible data (such as -- // JPEG) it's a huge win since the compressor quickly ""realizes"" the -- // data is incompressible and doesn't bother looking for matches -- // everywhere. -- // -- // The ""skip"" variable keeps track of how many bytes there are since -- // the last match; dividing it by 32 (ie. right-shifting by five) gives -- // the number of bytes to move ahead for each iteration. -- skip := int32(32) -- -- nextS := s -- var candidate tableEntry -- for { -- s = nextS -- bytesBetweenHashLookups := skip >> 5 -- nextS = s + bytesBetweenHashLookups -- skip += bytesBetweenHashLookups -- if nextS > sLimit { -- goto emitRemainder -- } -- candidates := e.table[nextHash&tableMask] -- now := load3232(src, nextS) -- e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} -- nextHash = hash(now) -- -- // Check both candidates -- candidate = candidates.Cur -- if cv == candidate.val { -- offset := s - (candidate.offset - e.cur) -- if offset <= maxMatchOffset { -- break -- } -- } else { -- // We only check if value mismatches. -- // Offset will always be invalid in other cases. -- candidate = candidates.Prev -- if cv == candidate.val { -- offset := s - (candidate.offset - e.cur) -- if offset <= maxMatchOffset { -- break -- } -- } -- } -- cv = now -- } -- -- // A 4-byte match has been found. We'll later see if more than 4 bytes -- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit -- // them as literal bytes. -- emitLiteral(dst, src[nextEmit:s]) -- -- // Call emitCopy, and then see if another emitCopy could be our next -- // move. Repeat until we find no match for the input immediately after -- // what was consumed by the last emitCopy call. -- // -- // If we exit this loop normally then we need to call emitLiteral next, -- // though we don't yet know how big the literal will be. We handle that -- // by proceeding to the next iteration of the main loop. We also can -- // exit this loop via goto if we get close to exhausting the input. -- for { -- // Invariant: we have a 4-byte match at s, and no need to emit any -- // literal bytes prior to s. -- -- // Extend the 4-byte match as long as possible. -- // -- s += 4 -- t := candidate.offset - e.cur + 4 -- l := e.matchlen(s, t, src) -- -- // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) -- dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) -- dst.n++ -- s += l -- nextEmit = s -- if s >= sLimit { -- t += l -- // Index first pair after match end. -- if int(t+4) < len(src) && t > 0 { -- cv := load3232(src, t) -- nextHash = hash(cv) -- e.table[nextHash&tableMask] = tableEntryPrev{ -- Prev: e.table[nextHash&tableMask].Cur, -- Cur: tableEntry{offset: e.cur + t, val: cv}, -- } -- } -- goto emitRemainder -- } -- -- // We could immediately start working at s now, but to improve -- // compression we first update the hash table at s-3 to s. If -- // another emitCopy is not our next move, also calculate nextHash -- // at s+1. At least on GOARCH=amd64, these three hash calculations -- // are faster as one load64 call (with some shifts) instead of -- // three load32 calls. -- x := load6432(src, s-3) -- prevHash := hash(uint32(x)) -- e.table[prevHash&tableMask] = tableEntryPrev{ -- Prev: e.table[prevHash&tableMask].Cur, -- Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, -- } -- x >>= 8 -- prevHash = hash(uint32(x)) -- -- e.table[prevHash&tableMask] = tableEntryPrev{ -- Prev: e.table[prevHash&tableMask].Cur, -- Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, -- } -- x >>= 8 -- prevHash = hash(uint32(x)) -- -- e.table[prevHash&tableMask] = tableEntryPrev{ -- Prev: e.table[prevHash&tableMask].Cur, -- Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, -- } -- x >>= 8 -- currHash := hash(uint32(x)) -- candidates := e.table[currHash&tableMask] -- cv = uint32(x) -- e.table[currHash&tableMask] = tableEntryPrev{ -- Prev: candidates.Cur, -- Cur: tableEntry{offset: s + e.cur, val: cv}, -- } -- -- // Check both candidates -- candidate = candidates.Cur -- if cv == candidate.val { -- offset := s - (candidate.offset - e.cur) -- if offset <= maxMatchOffset { -- continue -- } -- } else { -- // We only check if value mismatches. -- // Offset will always be invalid in other cases. -- candidate = candidates.Prev -- if cv == candidate.val { -- offset := s - (candidate.offset - e.cur) -- if offset <= maxMatchOffset { -- continue -- } -- } -- } -- cv = uint32(x >> 8) -- nextHash = hash(cv) -- s++ -- break -- } -- } -- --emitRemainder: -- if int(nextEmit) < len(src) { -- emitLiteral(dst, src[nextEmit:]) -- } -- e.cur += int32(len(src)) -- e.prev = e.prev[:len(src)] -- copy(e.prev, src) --} -- --// snappyL4 --type snappyL4 struct { -- snappyL3 --} -- --// Encode uses a similar algorithm to level 3, --// but will check up to two candidates if first isn't long enough. --func (e *snappyL4) Encode(dst *tokens, src []byte) { -- const ( -- inputMargin = 8 - 3 -- minNonLiteralBlockSize = 1 + 1 + inputMargin -- matchLenGood = 12 -- ) -- -- // Protect against e.cur wraparound. -- if e.cur > 1<<30 { -- for i := range e.table[:] { -- e.table[i] = tableEntryPrev{} -- } -- e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]} -- } -- -- // This check isn't in the Snappy implementation, but there, the caller -- // instead of the callee handles this case. -- if len(src) < minNonLiteralBlockSize { -- // We do not fill the token table. -- // This will be picked up by caller. -- dst.n = uint16(len(src)) -- e.cur += maxStoreBlockSize -- e.prev = e.prev[:0] -- return -- } -- -- // sLimit is when to stop looking for offset/length copies. The inputMargin -- // lets us use a fast path for emitLiteral in the main loop, while we are -- // looking for copies. -- sLimit := int32(len(src) - inputMargin) -- -- // nextEmit is where in src the next emitLiteral should start from. -- nextEmit := int32(0) -- s := int32(0) -- cv := load3232(src, s) -- nextHash := hash(cv) -- -- for { -- // Copied from the C++ snappy implementation: -- // -- // Heuristic match skipping: If 32 bytes are scanned with no matches -- // found, start looking only at every other byte. If 32 more bytes are -- // scanned (or skipped), look at every third byte, etc.. When a match -- // is found, immediately go back to looking at every byte. This is a -- // small loss (~5% performance, ~0.1% density) for compressible data -- // due to more bookkeeping, but for non-compressible data (such as -- // JPEG) it's a huge win since the compressor quickly ""realizes"" the -- // data is incompressible and doesn't bother looking for matches -- // everywhere. -- // -- // The ""skip"" variable keeps track of how many bytes there are since -- // the last match; dividing it by 32 (ie. right-shifting by five) gives -- // the number of bytes to move ahead for each iteration. -- skip := int32(32) -- -- nextS := s -- var candidate tableEntry -- var candidateAlt tableEntry -- for { -- s = nextS -- bytesBetweenHashLookups := skip >> 5 -- nextS = s + bytesBetweenHashLookups -- skip += bytesBetweenHashLookups -- if nextS > sLimit { -- goto emitRemainder -- } -- candidates := e.table[nextHash&tableMask] -- now := load3232(src, nextS) -- e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} -- nextHash = hash(now) -- -- // Check both candidates -- candidate = candidates.Cur -- if cv == candidate.val { -- offset := s - (candidate.offset - e.cur) -- if offset < maxMatchOffset { -- offset = s - (candidates.Prev.offset - e.cur) -- if cv == candidates.Prev.val && offset < maxMatchOffset { -- candidateAlt = candidates.Prev -- } -- break -- } -- } else { -- // We only check if value mismatches. -- // Offset will always be invalid in other cases. -- candidate = candidates.Prev -- if cv == candidate.val { -- offset := s - (candidate.offset - e.cur) -- if offset < maxMatchOffset { -- break -- } -- } -- } -- cv = now -- } -- -- // A 4-byte match has been found. We'll later see if more than 4 bytes -- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit -- // them as literal bytes. -- emitLiteral(dst, src[nextEmit:s]) -- -- // Call emitCopy, and then see if another emitCopy could be our next -- // move. Repeat until we find no match for the input immediately after -- // what was consumed by the last emitCopy call. -- // -- // If we exit this loop normally then we need to call emitLiteral next, -- // though we don't yet know how big the literal will be. We handle that -- // by proceeding to the next iteration of the main loop. We also can -- // exit this loop via goto if we get close to exhausting the input. -- for { -- // Invariant: we have a 4-byte match at s, and no need to emit any -- // literal bytes prior to s. -- -- // Extend the 4-byte match as long as possible. -- // -- s += 4 -- t := candidate.offset - e.cur + 4 -- l := e.matchlen(s, t, src) -- // Try alternative candidate if match length < matchLenGood. -- if l < matchLenGood-4 && candidateAlt.offset != 0 { -- t2 := candidateAlt.offset - e.cur + 4 -- l2 := e.matchlen(s, t2, src) -- if l2 > l { -- l = l2 -- t = t2 -- } -- } -- // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) -- dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) -- dst.n++ -- s += l -- nextEmit = s -- if s >= sLimit { -- t += l -- // Index first pair after match end. -- if int(t+4) < len(src) && t > 0 { -- cv := load3232(src, t) -- nextHash = hash(cv) -- e.table[nextHash&tableMask] = tableEntryPrev{ -- Prev: e.table[nextHash&tableMask].Cur, -- Cur: tableEntry{offset: e.cur + t, val: cv}, -- } -- } -- goto emitRemainder -- } -- -- // We could immediately start working at s now, but to improve -- // compression we first update the hash table at s-3 to s. If -- // another emitCopy is not our next move, also calculate nextHash -- // at s+1. At least on GOARCH=amd64, these three hash calculations -- // are faster as one load64 call (with some shifts) instead of -- // three load32 calls. -- x := load6432(src, s-3) -- prevHash := hash(uint32(x)) -- e.table[prevHash&tableMask] = tableEntryPrev{ -- Prev: e.table[prevHash&tableMask].Cur, -- Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, -- } -- x >>= 8 -- prevHash = hash(uint32(x)) -- -- e.table[prevHash&tableMask] = tableEntryPrev{ -- Prev: e.table[prevHash&tableMask].Cur, -- Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, -- } -- x >>= 8 -- prevHash = hash(uint32(x)) -- -- e.table[prevHash&tableMask] = tableEntryPrev{ -- Prev: e.table[prevHash&tableMask].Cur, -- Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, -- } -- x >>= 8 -- currHash := hash(uint32(x)) -- candidates := e.table[currHash&tableMask] -- cv = uint32(x) -- e.table[currHash&tableMask] = tableEntryPrev{ -- Prev: candidates.Cur, -- Cur: tableEntry{offset: s + e.cur, val: cv}, -- } -- -- // Check both candidates -- candidate = candidates.Cur -- candidateAlt = tableEntry{} -- if cv == candidate.val { -- offset := s - (candidate.offset - e.cur) -- if offset <= maxMatchOffset { -- offset = s - (candidates.Prev.offset - e.cur) -- if cv == candidates.Prev.val && offset <= maxMatchOffset { -- candidateAlt = candidates.Prev -- } -- continue -- } -- } else { -- // We only check if value mismatches. -- // Offset will always be invalid in other cases. -- candidate = candidates.Prev -- if cv == candidate.val { -- offset := s - (candidate.offset - e.cur) -- if offset <= maxMatchOffset { -- continue -- } -- } -- } -- cv = uint32(x >> 8) -- nextHash = hash(cv) -- s++ -- break -- } -- } -- --emitRemainder: -- if int(nextEmit) < len(src) { -- emitLiteral(dst, src[nextEmit:]) -- } -- e.cur += int32(len(src)) -- e.prev = e.prev[:len(src)] -- copy(e.prev, src) --} -- --func (e *snappyGen) matchlen(s, t int32, src []byte) int32 { -- s1 := int(s) + maxMatchLength - 4 -- if s1 > len(src) { -- s1 = len(src) -- } -- -- // If we are inside the current block -- if t >= 0 { -- b := src[t:] -- a := src[s:s1] -- b = b[:len(a)] -- // Extend the match to be as long as possible. -- for i := range a { -- if a[i] != b[i] { -- return int32(i) -- } -- } -- return int32(len(a)) -- } -- -- // We found a match in the previous block. -- tp := int32(len(e.prev)) + t -- if tp < 0 { -- return 0 -- } -- -- // Extend the match to be as long as possible. -- a := src[s:s1] -- b := e.prev[tp:] -- if len(b) > len(a) { -- b = b[:len(a)] -- } -- a = a[:len(b)] -- for i := range b { -- if a[i] != b[i] { -- return int32(i) -- } -- } -- -- // If we reached our limit, we matched everything we are -- // allowed to in the previous block and we return. -- n := int32(len(b)) -- if int(s+n) == s1 { -- return n -- } -- -- // Continue looking for more matches in the current block. -- a = src[s+n : s1] -- b = src[:len(a)] -- for i := range a { -- if a[i] != b[i] { -- return int32(i) + n -- } -- } -- return int32(len(a)) + n --} -- --// Reset the encoding table. --func (e *snappyGen) Reset() { -- e.prev = e.prev[:0] -- e.cur += maxMatchOffset --} -diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go -new file mode 100644 -index 0000000000000..a4705119757d7 ---- /dev/null -+++ b/vendor/github.com/klauspost/compress/flate/stateless.go -@@ -0,0 +1,266 @@ -+package flate -+ -+import ( -+ ""io"" -+ ""math"" -+ ""sync"" -+) -+ -+const ( -+ maxStatelessBlock = math.MaxInt16 -+ -+ slTableBits = 13 -+ slTableSize = 1 << slTableBits -+ slTableShift = 32 - slTableBits -+) -+ -+type statelessWriter struct { -+ dst io.Writer -+ closed bool -+} -+ -+func (s *statelessWriter) Close() error { -+ if s.closed { -+ return nil -+ } -+ s.closed = true -+ // Emit EOF block -+ return StatelessDeflate(s.dst, nil, true) -+} -+ -+func (s *statelessWriter) Write(p []byte) (n int, err error) { -+ err = StatelessDeflate(s.dst, p, false) -+ if err != nil { -+ return 0, err -+ } -+ return len(p), nil -+} -+ -+func (s *statelessWriter) Reset(w io.Writer) { -+ s.dst = w -+ s.closed = false -+} -+ -+// NewStatelessWriter will do compression but without maintaining any state -+// between Write calls. -+// There will be no memory kept between Write calls, -+// but compression and speed will be suboptimal. -+// Because of this, the size of actual Write calls will affect output size. -+func NewStatelessWriter(dst io.Writer) io.WriteCloser { -+ return &statelessWriter{dst: dst} -+} -+ -+// bitWriterPool contains bit writers that can be reused. -+var bitWriterPool = sync.Pool{ -+ New: func() interface{} { -+ return newHuffmanBitWriter(nil) -+ }, -+} -+ -+// StatelessDeflate allows to compress directly to a Writer without retaining state. -+// When returning everything will be flushed. -+func StatelessDeflate(out io.Writer, in []byte, eof bool) error { -+ var dst tokens -+ bw := bitWriterPool.Get().(*huffmanBitWriter) -+ bw.reset(out) -+ defer func() { -+ // don't keep a reference to our output -+ bw.reset(nil) -+ bitWriterPool.Put(bw) -+ }() -+ if eof && len(in) == 0 { -+ // Just write an EOF block. -+ // Could be faster... -+ bw.writeStoredHeader(0, true) -+ bw.flush() -+ return bw.err -+ } -+ -+ for len(in) > 0 { -+ todo := in -+ if len(todo) > maxStatelessBlock { -+ todo = todo[:maxStatelessBlock] -+ } -+ in = in[len(todo):] -+ // Compress -+ statelessEnc(&dst, todo) -+ isEof := eof && len(in) == 0 -+ -+ if dst.n == 0 { -+ bw.writeStoredHeader(len(todo), isEof) -+ if bw.err != nil { -+ return bw.err -+ } -+ bw.writeBytes(todo) -+ } else if int(dst.n) > len(todo)-len(todo)>>4 { -+ // If we removed less than 1/16th, huffman compress the block. -+ bw.writeBlockHuff(isEof, todo, false) -+ } else { -+ bw.writeBlockDynamic(&dst, isEof, todo, false) -+ } -+ if bw.err != nil { -+ return bw.err -+ } -+ dst.Reset() -+ } -+ if !eof { -+ // Align. -+ bw.writeStoredHeader(0, false) -+ } -+ bw.flush() -+ return bw.err -+} -+ -+func hashSL(u uint32) uint32 { -+ return (u * 0x1e35a7bd) >> slTableShift -+} -+ -+func load3216(b []byte, i int16) uint32 { -+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read. -+ b = b[i:] -+ b = b[:4] -+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -+} -+ -+func load6416(b []byte, i int16) uint64 { -+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read. -+ b = b[i:] -+ b = b[:8] -+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | -+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -+} -+ -+func statelessEnc(dst *tokens, src []byte) { -+ const ( -+ inputMargin = 12 - 1 -+ minNonLiteralBlockSize = 1 + 1 + inputMargin -+ ) -+ -+ type tableEntry struct { -+ offset int16 -+ } -+ -+ var table [slTableSize]tableEntry -+ -+ // This check isn't in the Snappy implementation, but there, the caller -+ // instead of the callee handles this case. -+ if len(src) < minNonLiteralBlockSize { -+ // We do not fill the token table. -+ // This will be picked up by caller. -+ dst.n = uint16(len(src)) -+ return -+ } -+ -+ s := int16(1) -+ nextEmit := int16(0) -+ // sLimit is when to stop looking for offset/length copies. The inputMargin -+ // lets us use a fast path for emitLiteral in the main loop, while we are -+ // looking for copies. -+ sLimit := int16(len(src) - inputMargin) -+ -+ // nextEmit is where in src the next emitLiteral should start from. -+ cv := load3216(src, s) -+ -+ for { -+ const skipLog = 5 -+ const doEvery = 2 -+ -+ nextS := s -+ var candidate tableEntry -+ for { -+ nextHash := hashSL(cv) -+ candidate = table[nextHash] -+ nextS = s + doEvery + (s-nextEmit)>>skipLog -+ if nextS > sLimit || nextS <= 0 { -+ goto emitRemainder -+ } -+ -+ now := load6416(src, nextS) -+ table[nextHash] = tableEntry{offset: s} -+ nextHash = hashSL(uint32(now)) -+ -+ if cv == load3216(src, candidate.offset) { -+ table[nextHash] = tableEntry{offset: nextS} -+ break -+ } -+ -+ // Do one right away... -+ cv = uint32(now) -+ s = nextS -+ nextS++ -+ candidate = table[nextHash] -+ now >>= 8 -+ table[nextHash] = tableEntry{offset: s} -+ -+ if cv == load3216(src, candidate.offset) { -+ table[nextHash] = tableEntry{offset: nextS} -+ break -+ } -+ cv = uint32(now) -+ s = nextS -+ } -+ -+ // A 4-byte match has been found. We'll later see if more than 4 bytes -+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit -+ // them as literal bytes. -+ for { -+ // Invariant: we have a 4-byte match at s, and no need to emit any -+ // literal bytes prior to s. -+ -+ // Extend the 4-byte match as long as possible. -+ t := candidate.offset -+ l := int16(matchLen(src[s+4:], src[t+4:]) + 4) -+ -+ // Extend backwards -+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] { -+ s-- -+ t-- -+ l++ -+ } -+ if nextEmit < s { -+ emitLiteral(dst, src[nextEmit:s]) -+ } -+ -+ // Save the match found -+ dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset)) -+ s += l -+ nextEmit = s -+ if nextS >= s { -+ s = nextS + 1 -+ } -+ if s >= sLimit { -+ goto emitRemainder -+ } -+ -+ // We could immediately start working at s now, but to improve -+ // compression we first update the hash table at s-2 and at s. If -+ // another emitCopy is not our next move, also calculate nextHash -+ // at s+1. At least on GOARCH=amd64, these three hash calculations -+ // are faster as one load64 call (with some shifts) instead of -+ // three load32 calls. -+ x := load6416(src, s-2) -+ o := s - 2 -+ prevHash := hashSL(uint32(x)) -+ table[prevHash] = tableEntry{offset: o} -+ x >>= 16 -+ currHash := hashSL(uint32(x)) -+ candidate = table[currHash] -+ table[currHash] = tableEntry{offset: o + 2} -+ -+ if uint32(x) != load3216(src, candidate.offset) { -+ cv = uint32(x >> 8) -+ s++ -+ break -+ } -+ } -+ } -+ -+emitRemainder: -+ if int(nextEmit) < len(src) { -+ // If nothing was added, don't encode literals. -+ if dst.n == 0 { -+ return -+ } -+ emitLiteral(dst, src[nextEmit:]) -+ } -+} -diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go -index 141299b973803..b3df0d8941e12 100644 ---- a/vendor/github.com/klauspost/compress/flate/token.go -+++ b/vendor/github.com/klauspost/compress/flate/token.go -@@ -4,6 +4,14 @@ - - package flate - -+import ( -+ ""bytes"" -+ ""encoding/binary"" -+ ""fmt"" -+ ""io"" -+ ""math"" -+) -+ - const ( - // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused - // 8 bits: xlength = length - MIN_MATCH_LENGTH -@@ -46,6 +54,36 @@ var lengthCodes = [256]uint8{ - 27, 27, 27, 27, 27, 28, - } - -+// lengthCodes1 is length codes, but starting at 1. -+var lengthCodes1 = [256]uint8{ -+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, -+ 10, 10, 11, 11, 12, 12, 13, 13, 13, 13, -+ 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, -+ 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, -+ 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, -+ 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, -+ 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, -+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, -+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, -+ 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, -+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, -+ 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, -+ 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, -+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, -+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, -+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, -+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, -+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, -+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, -+ 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, -+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, -+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, -+ 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, -+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, -+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, -+ 28, 28, 28, 28, 28, 29, -+} -+ - var offsetCodes = [256]uint32{ - 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, - 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, -@@ -65,19 +103,236 @@ var offsetCodes = [256]uint32{ - 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, - } - -+// offsetCodes14 are offsetCodes, but with 14 added. -+var offsetCodes14 = [256]uint32{ -+ 14, 15, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, -+ 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, -+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, -+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, -+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, -+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, -+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, -+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, -+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, -+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, -+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, -+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, -+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, -+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, -+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, -+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, -+} -+ - type token uint32 - - type tokens struct { -- tokens [maxStoreBlockSize + 1]token -- n uint16 // Must be able to contain maxStoreBlockSize -+ nLits int -+ extraHist [32]uint16 // codes 256->maxnumlit -+ offHist [32]uint16 // offset codes -+ litHist [256]uint16 // codes 0->255 -+ n uint16 // Must be able to contain maxStoreBlockSize -+ tokens [maxStoreBlockSize + 1]token -+} -+ -+func (t *tokens) Reset() { -+ if t.n == 0 { -+ return -+ } -+ t.n = 0 -+ t.nLits = 0 -+ for i := range t.litHist[:] { -+ t.litHist[i] = 0 -+ } -+ for i := range t.extraHist[:] { -+ t.extraHist[i] = 0 -+ } -+ for i := range t.offHist[:] { -+ t.offHist[i] = 0 -+ } -+} -+ -+func (t *tokens) Fill() { -+ if t.n == 0 { -+ return -+ } -+ for i, v := range t.litHist[:] { -+ if v == 0 { -+ t.litHist[i] = 1 -+ t.nLits++ -+ } -+ } -+ for i, v := range t.extraHist[:literalCount-256] { -+ if v == 0 { -+ t.nLits++ -+ t.extraHist[i] = 1 -+ } -+ } -+ for i, v := range t.offHist[:offsetCodeCount] { -+ if v == 0 { -+ t.offHist[i] = 1 -+ } -+ } -+} -+ -+func indexTokens(in []token) tokens { -+ var t tokens -+ t.indexTokens(in) -+ return t -+} -+ -+func (t *tokens) indexTokens(in []token) { -+ t.Reset() -+ for _, tok := range in { -+ if tok < matchType { -+ t.tokens[t.n] = tok -+ t.litHist[tok]++ -+ t.n++ -+ continue -+ } -+ t.AddMatch(uint32(tok.length()), tok.offset()) -+ } - } - --// Convert a literal into a literal token. --func literalToken(literal uint32) token { return token(literalType + literal) } -+// emitLiteral writes a literal chunk and returns the number of bytes written. -+func emitLiteral(dst *tokens, lit []byte) { -+ ol := int(dst.n) -+ for i, v := range lit { -+ dst.tokens[(i+ol)&maxStoreBlockSize] = token(v) -+ dst.litHist[v]++ -+ } -+ dst.n += uint16(len(lit)) -+ dst.nLits += len(lit) -+} - --// Convert a < xlength, xoffset > pair into a match token. --func matchToken(xlength uint32, xoffset uint32) token { -- return token(matchType + xlength< 0 { -+ invTotal := 1.0 / float64(t.nLits) -+ for _, v := range t.litHist[:] { -+ if v > 0 { -+ n := float64(v) -+ shannon += math.Ceil(-math.Log2(n*invTotal) * n) -+ } -+ } -+ // Just add 15 for EOB -+ shannon += 15 -+ for _, v := range t.extraHist[1 : literalCount-256] { -+ if v > 0 { -+ n := float64(v) -+ shannon += math.Ceil(-math.Log2(n*invTotal) * n) -+ bits += int(lengthExtraBits[v&31]) * int(v) -+ nMatches += int(v) -+ } -+ } -+ } -+ if nMatches > 0 { -+ invTotal := 1.0 / float64(nMatches) -+ for _, v := range t.offHist[:offsetCodeCount] { -+ if v > 0 { -+ n := float64(v) -+ shannon += math.Ceil(-math.Log2(n*invTotal) * n) -+ bits += int(offsetExtraBits[v&31]) * int(n) -+ } -+ } -+ } -+ -+ return int(shannon) + bits -+} -+ -+// AddMatch adds a match to the tokens. -+// This function is very sensitive to inlining and right on the border. -+func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { -+ if debugDecode { -+ if xlength >= maxMatchLength+baseMatchLength { -+ panic(fmt.Errorf(""invalid length: %v"", xlength)) -+ } -+ if xoffset >= maxMatchOffset+baseMatchOffset { -+ panic(fmt.Errorf(""invalid offset: %v"", xoffset)) -+ } -+ } -+ t.nLits++ -+ lengthCode := lengthCodes1[uint8(xlength)] & 31 -+ t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset { -+ panic(fmt.Errorf(""invalid offset: %v"", xoffset)) -+ } -+ } -+ oc := offsetCode(xoffset) & 31 -+ for xlength > 0 { -+ xl := xlength -+ if xl > 258 { -+ // We need to have at least baseMatchLength left over for next loop. -+ xl = 258 - baseMatchLength -+ } -+ xlength -= xl -+ xl -= 3 -+ t.nLits++ -+ lengthCode := lengthCodes1[uint8(xl)] & 31 -+ t.tokens[t.n] = token(matchType | uint32(xl)<>7 < uint32(len(offsetCodes)) { -+ return offsetCodes[(off>>7)&255] + 14 -+ } else { -+ return offsetCodes[(off>>14)&255] + 28 -+ } -+ } - if off < uint32(len(offsetCodes)) { -- return offsetCodes[off&255] -- } else if off>>7 < uint32(len(offsetCodes)) { -- return offsetCodes[(off>>7)&255] + 14 -- } else { -- return offsetCodes[(off>>14)&255] + 28 -+ return offsetCodes[uint8(off)] - } -+ return offsetCodes14[uint8(off>>7)] - } -diff --git a/vendor/github.com/klauspost/compress/gzip/gzip.go b/vendor/github.com/klauspost/compress/gzip/gzip.go -index 7da7ee7486eb8..ed0cc148f8c77 100644 ---- a/vendor/github.com/klauspost/compress/gzip/gzip.go -+++ b/vendor/github.com/klauspost/compress/gzip/gzip.go -@@ -22,6 +22,13 @@ const ( - DefaultCompression = flate.DefaultCompression - ConstantCompression = flate.ConstantCompression - HuffmanOnly = flate.HuffmanOnly -+ -+ // StatelessCompression will do compression but without maintaining any state -+ // between Write calls. -+ // There will be no memory kept between Write calls, -+ // but compression and speed will be suboptimal. -+ // Because of this, the size of actual Write calls will affect output size. -+ StatelessCompression = -3 - ) - - // A Writer is an io.WriteCloser. -@@ -59,7 +66,7 @@ func NewWriter(w io.Writer) *Writer { - // integer value between BestSpeed and BestCompression inclusive. The error - // returned will be nil if the level is valid. - func NewWriterLevel(w io.Writer, level int) (*Writer, error) { -- if level < HuffmanOnly || level > BestCompression { -+ if level < StatelessCompression || level > BestCompression { - return nil, fmt.Errorf(""gzip: invalid compression level: %d"", level) - } - z := new(Writer) -@@ -69,9 +76,12 @@ func NewWriterLevel(w io.Writer, level int) (*Writer, error) { - - func (z *Writer) init(w io.Writer, level int) { - compressor := z.compressor -- if compressor != nil { -- compressor.Reset(w) -+ if level != StatelessCompression { -+ if compressor != nil { -+ compressor.Reset(w) -+ } - } -+ - *z = Writer{ - Header: Header{ - OS: 255, // unknown -@@ -189,12 +199,16 @@ func (z *Writer) Write(p []byte) (int, error) { - return n, z.err - } - } -- if z.compressor == nil { -+ -+ if z.compressor == nil && z.level != StatelessCompression { - z.compressor, _ = flate.NewWriter(z.w, z.level) - } - } - z.size += uint32(len(p)) - z.digest = crc32.Update(z.digest, crc32.IEEETable, p) -+ if z.level == StatelessCompression { -+ return len(p), flate.StatelessDeflate(z.w, p, false) -+ } - n, z.err = z.compressor.Write(p) - return n, z.err - } -@@ -211,7 +225,7 @@ func (z *Writer) Flush() error { - if z.err != nil { - return z.err - } -- if z.closed { -+ if z.closed || z.level == StatelessCompression { - return nil - } - if !z.wroteHeader { -@@ -240,7 +254,11 @@ func (z *Writer) Close() error { - return z.err - } - } -- z.err = z.compressor.Close() -+ if z.level == StatelessCompression { -+ z.err = flate.StatelessDeflate(z.w, nil, true) -+ } else { -+ z.err = z.compressor.Close() -+ } - if z.err != nil { - return z.err - } -diff --git a/vendor/github.com/klauspost/cpuid/.gitignore b/vendor/github.com/klauspost/cpuid/.gitignore -deleted file mode 100644 -index daf913b1b347a..0000000000000 ---- a/vendor/github.com/klauspost/cpuid/.gitignore -+++ /dev/null -@@ -1,24 +0,0 @@ --# Compiled Object files, Static and Dynamic libs (Shared Objects) --*.o --*.a --*.so -- --# Folders --_obj --_test -- --# Architecture specific extensions/prefixes --*.[568vq] --[568vq].out -- --*.cgo1.go --*.cgo2.c --_cgo_defun.c --_cgo_gotypes.go --_cgo_export.* -- --_testmain.go -- --*.exe --*.test --*.prof -diff --git a/vendor/github.com/klauspost/cpuid/.travis.yml b/vendor/github.com/klauspost/cpuid/.travis.yml -deleted file mode 100644 -index 630192d597b2e..0000000000000 ---- a/vendor/github.com/klauspost/cpuid/.travis.yml -+++ /dev/null -@@ -1,23 +0,0 @@ --language: go -- --sudo: false -- --os: -- - linux -- - osx --go: -- - 1.8.x -- - 1.9.x -- - 1.10.x -- - master -- --script: -- - go vet ./... -- - go test -v ./... -- - go test -race ./... -- - diff <(gofmt -d .) <("""") -- --matrix: -- allow_failures: -- - go: 'master' -- fast_finish: true -diff --git a/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt -deleted file mode 100644 -index 2ef4714f7165b..0000000000000 ---- a/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt -+++ /dev/null -@@ -1,35 +0,0 @@ --Developer Certificate of Origin --Version 1.1 -- --Copyright (C) 2015- Klaus Post & Contributors. --Email: klauspost@gmail.com -- --Everyone is permitted to copy and distribute verbatim copies of this --license document, but changing it is not allowed. -- -- --Developer's Certificate of Origin 1.1 -- --By making a contribution to this project, I certify that: -- --(a) The contribution was created in whole or in part by me and I -- have the right to submit it under the open source license -- indicated in the file; or -- --(b) The contribution is based upon previous work that, to the best -- of my knowledge, is covered under an appropriate open source -- license and I have the right under that license to submit that -- work with modifications, whether created in whole or in part -- by me, under the same open source license (unless I am -- permitted to submit under a different license), as indicated -- in the file; or -- --(c) The contribution was provided directly to me by some other -- person who certified (a), (b) or (c) and I have not modified -- it. -- --(d) I understand and agree that this project and the contribution -- are public and that a record of the contribution (including all -- personal information I submit with it, including my sign-off) is -- maintained indefinitely and may be redistributed consistent with -- this project or the open source license(s) involved. -diff --git a/vendor/github.com/klauspost/cpuid/LICENSE b/vendor/github.com/klauspost/cpuid/LICENSE -deleted file mode 100644 -index 5cec7ee949b10..0000000000000 ---- a/vendor/github.com/klauspost/cpuid/LICENSE -+++ /dev/null -@@ -1,22 +0,0 @@ --The MIT License (MIT) -- --Copyright (c) 2015 Klaus Post -- --Permission is hereby granted, free of charge, to any person obtaining a copy --of this software and associated documentation files (the ""Software""), to deal --in the Software without restriction, including without limitation the rights --to use, copy, modify, merge, publish, distribute, sublicense, and/or sell --copies of the Software, and to permit persons to whom the Software is --furnished to do so, subject to the following conditions: -- --The above copyright notice and this permission notice shall be included in all --copies or substantial portions of the Software. -- --THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR --IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, --FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE --AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER --LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, --OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE --SOFTWARE. -- -diff --git a/vendor/github.com/klauspost/cpuid/README.md b/vendor/github.com/klauspost/cpuid/README.md -deleted file mode 100644 -index a7fb41fbecbc3..0000000000000 ---- a/vendor/github.com/klauspost/cpuid/README.md -+++ /dev/null -@@ -1,147 +0,0 @@ --# cpuid --Package cpuid provides information about the CPU running the current program. -- --CPU features are detected on startup, and kept for fast access through the life of the application. --Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use. -- --You can access the CPU information by accessing the shared CPU variable of the cpuid library. -- --Package home: https://github.com/klauspost/cpuid -- --[![GoDoc][1]][2] [![Build Status][3]][4] -- --[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg --[2]: https://godoc.org/github.com/klauspost/cpuid --[3]: https://travis-ci.org/klauspost/cpuid.svg --[4]: https://travis-ci.org/klauspost/cpuid -- --# features --## CPU Instructions --* **CMOV** (i686 CMOV) --* **NX** (NX (No-Execute) bit) --* **AMD3DNOW** (AMD 3DNOW) --* **AMD3DNOWEXT** (AMD 3DNowExt) --* **MMX** (standard MMX) --* **MMXEXT** (SSE integer functions or AMD MMX ext) --* **SSE** (SSE functions) --* **SSE2** (P4 SSE functions) --* **SSE3** (Prescott SSE3 functions) --* **SSSE3** (Conroe SSSE3 functions) --* **SSE4** (Penryn SSE4.1 functions) --* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions) --* **SSE42** (Nehalem SSE4.2 functions) --* **AVX** (AVX functions) --* **AVX2** (AVX2 functions) --* **FMA3** (Intel FMA 3) --* **FMA4** (Bulldozer FMA4 functions) --* **XOP** (Bulldozer XOP functions) --* **F16C** (Half-precision floating-point conversion) --* **BMI1** (Bit Manipulation Instruction Set 1) --* **BMI2** (Bit Manipulation Instruction Set 2) --* **TBM** (AMD Trailing Bit Manipulation) --* **LZCNT** (LZCNT instruction) --* **POPCNT** (POPCNT instruction) --* **AESNI** (Advanced Encryption Standard New Instructions) --* **CLMUL** (Carry-less Multiplication) --* **HTT** (Hyperthreading (enabled)) --* **HLE** (Hardware Lock Elision) --* **RTM** (Restricted Transactional Memory) --* **RDRAND** (RDRAND instruction is available) --* **RDSEED** (RDSEED instruction is available) --* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions)) --* **SHA** (Intel SHA Extensions) --* **AVX512F** (AVX-512 Foundation) --* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions) --* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions) --* **AVX512PF** (AVX-512 Prefetch Instructions) --* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions) --* **AVX512CD** (AVX-512 Conflict Detection Instructions) --* **AVX512BW** (AVX-512 Byte and Word Instructions) --* **AVX512VL** (AVX-512 Vector Length Extensions) --* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions) --* **MPX** (Intel MPX (Memory Protection Extensions)) --* **ERMS** (Enhanced REP MOVSB/STOSB) --* **RDTSCP** (RDTSCP Instruction) --* **CX16** (CMPXCHG16B Instruction) --* **SGX** (Software Guard Extensions, with activation details) -- --## Performance --* **RDTSCP()** Returns current cycle count. Can be used for benchmarking. --* **SSE2SLOW** (SSE2 is supported, but usually not faster) --* **SSE3SLOW** (SSE3 is supported, but usually not faster) --* **ATOM** (Atom processor, some SSSE3 instructions are slower) --* **Cache line** (Probable size of a cache line). --* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs. -- --## Cpu Vendor/VM --* **Intel** --* **AMD** --* **VIA** --* **Transmeta** --* **NSC** --* **KVM** (Kernel-based Virtual Machine) --* **MSVM** (Microsoft Hyper-V or Windows Virtual PC) --* **VMware** --* **XenHVM** --* **Bhyve** --* **Hygon** -- --# installing -- --```go get github.com/klauspost/cpuid``` -- --# example -- --```Go --package main -- --import ( -- ""fmt"" -- ""github.com/klauspost/cpuid"" --) -- --func main() { -- // Print basic CPU information: -- fmt.Println(""Name:"", cpuid.CPU.BrandName) -- fmt.Println(""PhysicalCores:"", cpuid.CPU.PhysicalCores) -- fmt.Println(""ThreadsPerCore:"", cpuid.CPU.ThreadsPerCore) -- fmt.Println(""LogicalCores:"", cpuid.CPU.LogicalCores) -- fmt.Println(""Family"", cpuid.CPU.Family, ""Model:"", cpuid.CPU.Model) -- fmt.Println(""Features:"", cpuid.CPU.Features) -- fmt.Println(""Cacheline bytes:"", cpuid.CPU.CacheLine) -- fmt.Println(""L1 Data Cache:"", cpuid.CPU.Cache.L1D, ""bytes"") -- fmt.Println(""L1 Instruction Cache:"", cpuid.CPU.Cache.L1D, ""bytes"") -- fmt.Println(""L2 Cache:"", cpuid.CPU.Cache.L2, ""bytes"") -- fmt.Println(""L3 Cache:"", cpuid.CPU.Cache.L3, ""bytes"") -- -- // Test if we have a specific feature: -- if cpuid.CPU.SSE() { -- fmt.Println(""We have Streaming SIMD Extensions"") -- } --} --``` -- --Sample output: --``` -->go run main.go --Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz --PhysicalCores: 2 --ThreadsPerCore: 2 --LogicalCores: 4 --Family 6 Model: 42 --Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL --Cacheline bytes: 64 --We have Streaming SIMD Extensions --``` -- --# private package -- --In the ""private"" folder you can find an autogenerated version of the library you can include in your own packages. -- --For this purpose all exports are removed, and functions and constants are lowercased. -- --This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages. -- --# license -- --This code is published under an MIT license. See LICENSE file for more information. -diff --git a/vendor/github.com/klauspost/cpuid/cpuid.go b/vendor/github.com/klauspost/cpuid/cpuid.go -deleted file mode 100644 -index db95913212311..0000000000000 ---- a/vendor/github.com/klauspost/cpuid/cpuid.go -+++ /dev/null -@@ -1,1049 +0,0 @@ --// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. -- --// Package cpuid provides information about the CPU running the current program. --// --// CPU features are detected on startup, and kept for fast access through the life of the application. --// Currently x86 / x64 (AMD64) is supported. --// --// You can access the CPU information by accessing the shared CPU variable of the cpuid library. --// --// Package home: https://github.com/klauspost/cpuid --package cpuid -- --import ""strings"" -- --// Vendor is a representation of a CPU vendor. --type Vendor int -- --const ( -- Other Vendor = iota -- Intel -- AMD -- VIA -- Transmeta -- NSC -- KVM // Kernel-based Virtual Machine -- MSVM // Microsoft Hyper-V or Windows Virtual PC -- VMware -- XenHVM -- Bhyve -- Hygon --) -- --const ( -- CMOV = 1 << iota // i686 CMOV -- NX // NX (No-Execute) bit -- AMD3DNOW // AMD 3DNOW -- AMD3DNOWEXT // AMD 3DNowExt -- MMX // standard MMX -- MMXEXT // SSE integer functions or AMD MMX ext -- SSE // SSE functions -- SSE2 // P4 SSE functions -- SSE3 // Prescott SSE3 functions -- SSSE3 // Conroe SSSE3 functions -- SSE4 // Penryn SSE4.1 functions -- SSE4A // AMD Barcelona microarchitecture SSE4a instructions -- SSE42 // Nehalem SSE4.2 functions -- AVX // AVX functions -- AVX2 // AVX2 functions -- FMA3 // Intel FMA 3 -- FMA4 // Bulldozer FMA4 functions -- XOP // Bulldozer XOP functions -- F16C // Half-precision floating-point conversion -- BMI1 // Bit Manipulation Instruction Set 1 -- BMI2 // Bit Manipulation Instruction Set 2 -- TBM // AMD Trailing Bit Manipulation -- LZCNT // LZCNT instruction -- POPCNT // POPCNT instruction -- AESNI // Advanced Encryption Standard New Instructions -- CLMUL // Carry-less Multiplication -- HTT // Hyperthreading (enabled) -- HLE // Hardware Lock Elision -- RTM // Restricted Transactional Memory -- RDRAND // RDRAND instruction is available -- RDSEED // RDSEED instruction is available -- ADX // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) -- SHA // Intel SHA Extensions -- AVX512F // AVX-512 Foundation -- AVX512DQ // AVX-512 Doubleword and Quadword Instructions -- AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions -- AVX512PF // AVX-512 Prefetch Instructions -- AVX512ER // AVX-512 Exponential and Reciprocal Instructions -- AVX512CD // AVX-512 Conflict Detection Instructions -- AVX512BW // AVX-512 Byte and Word Instructions -- AVX512VL // AVX-512 Vector Length Extensions -- AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions -- MPX // Intel MPX (Memory Protection Extensions) -- ERMS // Enhanced REP MOVSB/STOSB -- RDTSCP // RDTSCP Instruction -- CX16 // CMPXCHG16B Instruction -- SGX // Software Guard Extensions -- IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) -- STIBP // Single Thread Indirect Branch Predictors -- -- // Performance indicators -- SSE2SLOW // SSE2 is supported, but usually not faster -- SSE3SLOW // SSE3 is supported, but usually not faster -- ATOM // Atom processor, some SSSE3 instructions are slower --) -- --var flagNames = map[Flags]string{ -- CMOV: ""CMOV"", // i686 CMOV -- NX: ""NX"", // NX (No-Execute) bit -- AMD3DNOW: ""AMD3DNOW"", // AMD 3DNOW -- AMD3DNOWEXT: ""AMD3DNOWEXT"", // AMD 3DNowExt -- MMX: ""MMX"", // Standard MMX -- MMXEXT: ""MMXEXT"", // SSE integer functions or AMD MMX ext -- SSE: ""SSE"", // SSE functions -- SSE2: ""SSE2"", // P4 SSE2 functions -- SSE3: ""SSE3"", // Prescott SSE3 functions -- SSSE3: ""SSSE3"", // Conroe SSSE3 functions -- SSE4: ""SSE4.1"", // Penryn SSE4.1 functions -- SSE4A: ""SSE4A"", // AMD Barcelona microarchitecture SSE4a instructions -- SSE42: ""SSE4.2"", // Nehalem SSE4.2 functions -- AVX: ""AVX"", // AVX functions -- AVX2: ""AVX2"", // AVX functions -- FMA3: ""FMA3"", // Intel FMA 3 -- FMA4: ""FMA4"", // Bulldozer FMA4 functions -- XOP: ""XOP"", // Bulldozer XOP functions -- F16C: ""F16C"", // Half-precision floating-point conversion -- BMI1: ""BMI1"", // Bit Manipulation Instruction Set 1 -- BMI2: ""BMI2"", // Bit Manipulation Instruction Set 2 -- TBM: ""TBM"", // AMD Trailing Bit Manipulation -- LZCNT: ""LZCNT"", // LZCNT instruction -- POPCNT: ""POPCNT"", // POPCNT instruction -- AESNI: ""AESNI"", // Advanced Encryption Standard New Instructions -- CLMUL: ""CLMUL"", // Carry-less Multiplication -- HTT: ""HTT"", // Hyperthreading (enabled) -- HLE: ""HLE"", // Hardware Lock Elision -- RTM: ""RTM"", // Restricted Transactional Memory -- RDRAND: ""RDRAND"", // RDRAND instruction is available -- RDSEED: ""RDSEED"", // RDSEED instruction is available -- ADX: ""ADX"", // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) -- SHA: ""SHA"", // Intel SHA Extensions -- AVX512F: ""AVX512F"", // AVX-512 Foundation -- AVX512DQ: ""AVX512DQ"", // AVX-512 Doubleword and Quadword Instructions -- AVX512IFMA: ""AVX512IFMA"", // AVX-512 Integer Fused Multiply-Add Instructions -- AVX512PF: ""AVX512PF"", // AVX-512 Prefetch Instructions -- AVX512ER: ""AVX512ER"", // AVX-512 Exponential and Reciprocal Instructions -- AVX512CD: ""AVX512CD"", // AVX-512 Conflict Detection Instructions -- AVX512BW: ""AVX512BW"", // AVX-512 Byte and Word Instructions -- AVX512VL: ""AVX512VL"", // AVX-512 Vector Length Extensions -- AVX512VBMI: ""AVX512VBMI"", // AVX-512 Vector Bit Manipulation Instructions -- MPX: ""MPX"", // Intel MPX (Memory Protection Extensions) -- ERMS: ""ERMS"", // Enhanced REP MOVSB/STOSB -- RDTSCP: ""RDTSCP"", // RDTSCP Instruction -- CX16: ""CX16"", // CMPXCHG16B Instruction -- SGX: ""SGX"", // Software Guard Extensions -- IBPB: ""IBPB"", // Indirect Branch Restricted Speculation and Indirect Branch Predictor Barrier -- STIBP: ""STIBP"", // Single Thread Indirect Branch Predictors -- -- // Performance indicators -- SSE2SLOW: ""SSE2SLOW"", // SSE2 supported, but usually not faster -- SSE3SLOW: ""SSE3SLOW"", // SSE3 supported, but usually not faster -- ATOM: ""ATOM"", // Atom processor, some SSSE3 instructions are slower -- --} -- --// CPUInfo contains information about the detected system CPU. --type CPUInfo struct { -- BrandName string // Brand name reported by the CPU -- VendorID Vendor // Comparable CPU vendor ID -- Features Flags // Features of the CPU -- PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. -- ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. -- LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. -- Family int // CPU family number -- Model int // CPU model number -- CacheLine int // Cache line size in bytes. Will be 0 if undetectable. -- Cache struct { -- L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected -- L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected -- L2 int // L2 Cache (per core or shared). Will be -1 if undetected -- L3 int // L3 Instruction Cache (per core or shared). Will be -1 if undetected -- } -- SGX SGXSupport -- maxFunc uint32 -- maxExFunc uint32 --} -- --var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) --var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) --var xgetbv func(index uint32) (eax, edx uint32) --var rdtscpAsm func() (eax, ebx, ecx, edx uint32) -- --// CPU contains information about the CPU as detected on startup, --// or when Detect last was called. --// --// Use this as the primary entry point to you data, --// this way queries are --var CPU CPUInfo -- --func init() { -- initCPU() -- Detect() --} -- --// Detect will re-detect current CPU info. --// This will replace the content of the exported CPU variable. --// --// Unless you expect the CPU to change while you are running your program --// you should not need to call this function. --// If you call this, you must ensure that no other goroutine is accessing the --// exported CPU variable. --func Detect() { -- CPU.maxFunc = maxFunctionID() -- CPU.maxExFunc = maxExtendedFunction() -- CPU.BrandName = brandName() -- CPU.CacheLine = cacheLine() -- CPU.Family, CPU.Model = familyModel() -- CPU.Features = support() -- CPU.SGX = hasSGX(CPU.Features&SGX != 0) -- CPU.ThreadsPerCore = threadsPerCore() -- CPU.LogicalCores = logicalCores() -- CPU.PhysicalCores = physicalCores() -- CPU.VendorID = vendorID() -- CPU.cacheSize() --} -- --// Generated here: http://play.golang.org/p/BxFH2Gdc0G -- --// Cmov indicates support of CMOV instructions --func (c CPUInfo) Cmov() bool { -- return c.Features&CMOV != 0 --} -- --// Amd3dnow indicates support of AMD 3DNOW! instructions --func (c CPUInfo) Amd3dnow() bool { -- return c.Features&AMD3DNOW != 0 --} -- --// Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions --func (c CPUInfo) Amd3dnowExt() bool { -- return c.Features&AMD3DNOWEXT != 0 --} -- --// MMX indicates support of MMX instructions --func (c CPUInfo) MMX() bool { -- return c.Features&MMX != 0 --} -- --// MMXExt indicates support of MMXEXT instructions --// (SSE integer functions or AMD MMX ext) --func (c CPUInfo) MMXExt() bool { -- return c.Features&MMXEXT != 0 --} -- --// SSE indicates support of SSE instructions --func (c CPUInfo) SSE() bool { -- return c.Features&SSE != 0 --} -- --// SSE2 indicates support of SSE 2 instructions --func (c CPUInfo) SSE2() bool { -- return c.Features&SSE2 != 0 --} -- --// SSE3 indicates support of SSE 3 instructions --func (c CPUInfo) SSE3() bool { -- return c.Features&SSE3 != 0 --} -- --// SSSE3 indicates support of SSSE 3 instructions --func (c CPUInfo) SSSE3() bool { -- return c.Features&SSSE3 != 0 --} -- --// SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions --func (c CPUInfo) SSE4() bool { -- return c.Features&SSE4 != 0 --} -- --// SSE42 indicates support of SSE4.2 instructions --func (c CPUInfo) SSE42() bool { -- return c.Features&SSE42 != 0 --} -- --// AVX indicates support of AVX instructions --// and operating system support of AVX instructions --func (c CPUInfo) AVX() bool { -- return c.Features&AVX != 0 --} -- --// AVX2 indicates support of AVX2 instructions --func (c CPUInfo) AVX2() bool { -- return c.Features&AVX2 != 0 --} -- --// FMA3 indicates support of FMA3 instructions --func (c CPUInfo) FMA3() bool { -- return c.Features&FMA3 != 0 --} -- --// FMA4 indicates support of FMA4 instructions --func (c CPUInfo) FMA4() bool { -- return c.Features&FMA4 != 0 --} -- --// XOP indicates support of XOP instructions --func (c CPUInfo) XOP() bool { -- return c.Features&XOP != 0 --} -- --// F16C indicates support of F16C instructions --func (c CPUInfo) F16C() bool { -- return c.Features&F16C != 0 --} -- --// BMI1 indicates support of BMI1 instructions --func (c CPUInfo) BMI1() bool { -- return c.Features&BMI1 != 0 --} -- --// BMI2 indicates support of BMI2 instructions --func (c CPUInfo) BMI2() bool { -- return c.Features&BMI2 != 0 --} -- --// TBM indicates support of TBM instructions --// (AMD Trailing Bit Manipulation) --func (c CPUInfo) TBM() bool { -- return c.Features&TBM != 0 --} -- --// Lzcnt indicates support of LZCNT instruction --func (c CPUInfo) Lzcnt() bool { -- return c.Features&LZCNT != 0 --} -- --// Popcnt indicates support of POPCNT instruction --func (c CPUInfo) Popcnt() bool { -- return c.Features&POPCNT != 0 --} -- --// HTT indicates the processor has Hyperthreading enabled --func (c CPUInfo) HTT() bool { -- return c.Features&HTT != 0 --} -- --// SSE2Slow indicates that SSE2 may be slow on this processor --func (c CPUInfo) SSE2Slow() bool { -- return c.Features&SSE2SLOW != 0 --} -- --// SSE3Slow indicates that SSE3 may be slow on this processor --func (c CPUInfo) SSE3Slow() bool { -- return c.Features&SSE3SLOW != 0 --} -- --// AesNi indicates support of AES-NI instructions --// (Advanced Encryption Standard New Instructions) --func (c CPUInfo) AesNi() bool { -- return c.Features&AESNI != 0 --} -- --// Clmul indicates support of CLMUL instructions --// (Carry-less Multiplication) --func (c CPUInfo) Clmul() bool { -- return c.Features&CLMUL != 0 --} -- --// NX indicates support of NX (No-Execute) bit --func (c CPUInfo) NX() bool { -- return c.Features&NX != 0 --} -- --// SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions --func (c CPUInfo) SSE4A() bool { -- return c.Features&SSE4A != 0 --} -- --// HLE indicates support of Hardware Lock Elision --func (c CPUInfo) HLE() bool { -- return c.Features&HLE != 0 --} -- --// RTM indicates support of Restricted Transactional Memory --func (c CPUInfo) RTM() bool { -- return c.Features&RTM != 0 --} -- --// Rdrand indicates support of RDRAND instruction is available --func (c CPUInfo) Rdrand() bool { -- return c.Features&RDRAND != 0 --} -- --// Rdseed indicates support of RDSEED instruction is available --func (c CPUInfo) Rdseed() bool { -- return c.Features&RDSEED != 0 --} -- --// ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions) --func (c CPUInfo) ADX() bool { -- return c.Features&ADX != 0 --} -- --// SHA indicates support of Intel SHA Extensions --func (c CPUInfo) SHA() bool { -- return c.Features&SHA != 0 --} -- --// AVX512F indicates support of AVX-512 Foundation --func (c CPUInfo) AVX512F() bool { -- return c.Features&AVX512F != 0 --} -- --// AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions --func (c CPUInfo) AVX512DQ() bool { -- return c.Features&AVX512DQ != 0 --} -- --// AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions --func (c CPUInfo) AVX512IFMA() bool { -- return c.Features&AVX512IFMA != 0 --} -- --// AVX512PF indicates support of AVX-512 Prefetch Instructions --func (c CPUInfo) AVX512PF() bool { -- return c.Features&AVX512PF != 0 --} -- --// AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions --func (c CPUInfo) AVX512ER() bool { -- return c.Features&AVX512ER != 0 --} -- --// AVX512CD indicates support of AVX-512 Conflict Detection Instructions --func (c CPUInfo) AVX512CD() bool { -- return c.Features&AVX512CD != 0 --} -- --// AVX512BW indicates support of AVX-512 Byte and Word Instructions --func (c CPUInfo) AVX512BW() bool { -- return c.Features&AVX512BW != 0 --} -- --// AVX512VL indicates support of AVX-512 Vector Length Extensions --func (c CPUInfo) AVX512VL() bool { -- return c.Features&AVX512VL != 0 --} -- --// AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions --func (c CPUInfo) AVX512VBMI() bool { -- return c.Features&AVX512VBMI != 0 --} -- --// MPX indicates support of Intel MPX (Memory Protection Extensions) --func (c CPUInfo) MPX() bool { -- return c.Features&MPX != 0 --} -- --// ERMS indicates support of Enhanced REP MOVSB/STOSB --func (c CPUInfo) ERMS() bool { -- return c.Features&ERMS != 0 --} -- --// RDTSCP Instruction is available. --func (c CPUInfo) RDTSCP() bool { -- return c.Features&RDTSCP != 0 --} -- --// CX16 indicates if CMPXCHG16B instruction is available. --func (c CPUInfo) CX16() bool { -- return c.Features&CX16 != 0 --} -- --// TSX is split into HLE (Hardware Lock Elision) and RTM (Restricted Transactional Memory) detection. --// So TSX simply checks that. --func (c CPUInfo) TSX() bool { -- return c.Features&(HLE|RTM) == HLE|RTM --} -- --// Atom indicates an Atom processor --func (c CPUInfo) Atom() bool { -- return c.Features&ATOM != 0 --} -- --// Intel returns true if vendor is recognized as Intel --func (c CPUInfo) Intel() bool { -- return c.VendorID == Intel --} -- --// AMD returns true if vendor is recognized as AMD --func (c CPUInfo) AMD() bool { -- return c.VendorID == AMD --} -- --// Hygon returns true if vendor is recognized as Hygon --func (c CPUInfo) Hygon() bool { -- return c.VendorID == Hygon --} -- --// Transmeta returns true if vendor is recognized as Transmeta --func (c CPUInfo) Transmeta() bool { -- return c.VendorID == Transmeta --} -- --// NSC returns true if vendor is recognized as National Semiconductor --func (c CPUInfo) NSC() bool { -- return c.VendorID == NSC --} -- --// VIA returns true if vendor is recognized as VIA --func (c CPUInfo) VIA() bool { -- return c.VendorID == VIA --} -- --// RTCounter returns the 64-bit time-stamp counter --// Uses the RDTSCP instruction. The value 0 is returned --// if the CPU does not support the instruction. --func (c CPUInfo) RTCounter() uint64 { -- if !c.RDTSCP() { -- return 0 -- } -- a, _, _, d := rdtscpAsm() -- return uint64(a) | (uint64(d) << 32) --} -- --// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. --// This variable is OS dependent, but on Linux contains information --// about the current cpu/core the code is running on. --// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. --func (c CPUInfo) Ia32TscAux() uint32 { -- if !c.RDTSCP() { -- return 0 -- } -- _, _, ecx, _ := rdtscpAsm() -- return ecx --} -- --// LogicalCPU will return the Logical CPU the code is currently executing on. --// This is likely to change when the OS re-schedules the running thread --// to another CPU. --// If the current core cannot be detected, -1 will be returned. --func (c CPUInfo) LogicalCPU() int { -- if c.maxFunc < 1 { -- return -1 -- } -- _, ebx, _, _ := cpuid(1) -- return int(ebx >> 24) --} -- --// VM Will return true if the cpu id indicates we are in --// a virtual machine. This is only a hint, and will very likely --// have many false negatives. --func (c CPUInfo) VM() bool { -- switch c.VendorID { -- case MSVM, KVM, VMware, XenHVM, Bhyve: -- return true -- } -- return false --} -- --// Flags contains detected cpu features and caracteristics --type Flags uint64 -- --// String returns a string representation of the detected --// CPU features. --func (f Flags) String() string { -- return strings.Join(f.Strings(), "","") --} -- --// Strings returns and array of the detected features. --func (f Flags) Strings() []string { -- s := support() -- r := make([]string, 0, 20) -- for i := uint(0); i < 64; i++ { -- key := Flags(1 << i) -- val := flagNames[key] -- if s&key != 0 { -- r = append(r, val) -- } -- } -- return r --} -- --func maxExtendedFunction() uint32 { -- eax, _, _, _ := cpuid(0x80000000) -- return eax --} -- --func maxFunctionID() uint32 { -- a, _, _, _ := cpuid(0) -- return a --} -- --func brandName() string { -- if maxExtendedFunction() >= 0x80000004 { -- v := make([]uint32, 0, 48) -- for i := uint32(0); i < 3; i++ { -- a, b, c, d := cpuid(0x80000002 + i) -- v = append(v, a, b, c, d) -- } -- return strings.Trim(string(valAsString(v...)), "" "") -- } -- return ""unknown"" --} -- --func threadsPerCore() int { -- mfi := maxFunctionID() -- if mfi < 0x4 || vendorID() != Intel { -- return 1 -- } -- -- if mfi < 0xb { -- _, b, _, d := cpuid(1) -- if (d & (1 << 28)) != 0 { -- // v will contain logical core count -- v := (b >> 16) & 255 -- if v > 1 { -- a4, _, _, _ := cpuid(4) -- // physical cores -- v2 := (a4 >> 26) + 1 -- if v2 > 0 { -- return int(v) / int(v2) -- } -- } -- } -- return 1 -- } -- _, b, _, _ := cpuidex(0xb, 0) -- if b&0xffff == 0 { -- return 1 -- } -- return int(b & 0xffff) --} -- --func logicalCores() int { -- mfi := maxFunctionID() -- switch vendorID() { -- case Intel: -- // Use this on old Intel processors -- if mfi < 0xb { -- if mfi < 1 { -- return 0 -- } -- // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) -- // that can be assigned to logical processors in a physical package. -- // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. -- _, ebx, _, _ := cpuid(1) -- logical := (ebx >> 16) & 0xff -- return int(logical) -- } -- _, b, _, _ := cpuidex(0xb, 1) -- return int(b & 0xffff) -- case AMD, Hygon: -- _, b, _, _ := cpuid(1) -- return int((b >> 16) & 0xff) -- default: -- return 0 -- } --} -- --func familyModel() (int, int) { -- if maxFunctionID() < 0x1 { -- return 0, 0 -- } -- eax, _, _, _ := cpuid(1) -- family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff) -- model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0) -- return int(family), int(model) --} -- --func physicalCores() int { -- switch vendorID() { -- case Intel: -- return logicalCores() / threadsPerCore() -- case AMD, Hygon: -- if maxExtendedFunction() >= 0x80000008 { -- _, _, c, _ := cpuid(0x80000008) -- return int(c&0xff) + 1 -- } -- } -- return 0 --} -- --// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID --var vendorMapping = map[string]Vendor{ -- ""AMDisbetter!"": AMD, -- ""AuthenticAMD"": AMD, -- ""CentaurHauls"": VIA, -- ""GenuineIntel"": Intel, -- ""TransmetaCPU"": Transmeta, -- ""GenuineTMx86"": Transmeta, -- ""Geode by NSC"": NSC, -- ""VIA VIA VIA "": VIA, -- ""KVMKVMKVMKVM"": KVM, -- ""Microsoft Hv"": MSVM, -- ""VMwareVMware"": VMware, -- ""XenVMMXenVMM"": XenHVM, -- ""bhyve bhyve "": Bhyve, -- ""HygonGenuine"": Hygon, --} -- --func vendorID() Vendor { -- _, b, c, d := cpuid(0) -- v := valAsString(b, d, c) -- vend, ok := vendorMapping[string(v)] -- if !ok { -- return Other -- } -- return vend --} -- --func cacheLine() int { -- if maxFunctionID() < 0x1 { -- return 0 -- } -- -- _, ebx, _, _ := cpuid(1) -- cache := (ebx & 0xff00) >> 5 // cflush size -- if cache == 0 && maxExtendedFunction() >= 0x80000006 { -- _, _, ecx, _ := cpuid(0x80000006) -- cache = ecx & 0xff // cacheline size -- } -- // TODO: Read from Cache and TLB Information -- return int(cache) --} -- --func (c *CPUInfo) cacheSize() { -- c.Cache.L1D = -1 -- c.Cache.L1I = -1 -- c.Cache.L2 = -1 -- c.Cache.L3 = -1 -- vendor := vendorID() -- switch vendor { -- case Intel: -- if maxFunctionID() < 4 { -- return -- } -- for i := uint32(0); ; i++ { -- eax, ebx, ecx, _ := cpuidex(4, i) -- cacheType := eax & 15 -- if cacheType == 0 { -- break -- } -- cacheLevel := (eax >> 5) & 7 -- coherency := int(ebx&0xfff) + 1 -- partitions := int((ebx>>12)&0x3ff) + 1 -- associativity := int((ebx>>22)&0x3ff) + 1 -- sets := int(ecx) + 1 -- size := associativity * partitions * coherency * sets -- switch cacheLevel { -- case 1: -- if cacheType == 1 { -- // 1 = Data Cache -- c.Cache.L1D = size -- } else if cacheType == 2 { -- // 2 = Instruction Cache -- c.Cache.L1I = size -- } else { -- if c.Cache.L1D < 0 { -- c.Cache.L1I = size -- } -- if c.Cache.L1I < 0 { -- c.Cache.L1I = size -- } -- } -- case 2: -- c.Cache.L2 = size -- case 3: -- c.Cache.L3 = size -- } -- } -- case AMD, Hygon: -- // Untested. -- if maxExtendedFunction() < 0x80000005 { -- return -- } -- _, _, ecx, edx := cpuid(0x80000005) -- c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024) -- c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024) -- -- if maxExtendedFunction() < 0x80000006 { -- return -- } -- _, _, ecx, _ = cpuid(0x80000006) -- c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024) -- } -- -- return --} -- --type SGXSupport struct { -- Available bool -- SGX1Supported bool -- SGX2Supported bool -- MaxEnclaveSizeNot64 int64 -- MaxEnclaveSize64 int64 --} -- --func hasSGX(available bool) (rval SGXSupport) { -- rval.Available = available -- -- if !available { -- return -- } -- -- a, _, _, d := cpuidex(0x12, 0) -- rval.SGX1Supported = a&0x01 != 0 -- rval.SGX2Supported = a&0x02 != 0 -- rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2 -- rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2 -- -- return --} -- --func support() Flags { -- mfi := maxFunctionID() -- vend := vendorID() -- if mfi < 0x1 { -- return 0 -- } -- rval := uint64(0) -- _, _, c, d := cpuid(1) -- if (d & (1 << 15)) != 0 { -- rval |= CMOV -- } -- if (d & (1 << 23)) != 0 { -- rval |= MMX -- } -- if (d & (1 << 25)) != 0 { -- rval |= MMXEXT -- } -- if (d & (1 << 25)) != 0 { -- rval |= SSE -- } -- if (d & (1 << 26)) != 0 { -- rval |= SSE2 -- } -- if (c & 1) != 0 { -- rval |= SSE3 -- } -- if (c & 0x00000200) != 0 { -- rval |= SSSE3 -- } -- if (c & 0x00080000) != 0 { -- rval |= SSE4 -- } -- if (c & 0x00100000) != 0 { -- rval |= SSE42 -- } -- if (c & (1 << 25)) != 0 { -- rval |= AESNI -- } -- if (c & (1 << 1)) != 0 { -- rval |= CLMUL -- } -- if c&(1<<23) != 0 { -- rval |= POPCNT -- } -- if c&(1<<30) != 0 { -- rval |= RDRAND -- } -- if c&(1<<29) != 0 { -- rval |= F16C -- } -- if c&(1<<13) != 0 { -- rval |= CX16 -- } -- if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 { -- if threadsPerCore() > 1 { -- rval |= HTT -- } -- } -- -- // Check XGETBV, OXSAVE and AVX bits -- if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 { -- // Check for OS support -- eax, _ := xgetbv(0) -- if (eax & 0x6) == 0x6 { -- rval |= AVX -- if (c & 0x00001000) != 0 { -- rval |= FMA3 -- } -- } -- } -- -- // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. -- if mfi >= 7 { -- _, ebx, ecx, edx := cpuidex(7, 0) -- if (rval&AVX) != 0 && (ebx&0x00000020) != 0 { -- rval |= AVX2 -- } -- if (ebx & 0x00000008) != 0 { -- rval |= BMI1 -- if (ebx & 0x00000100) != 0 { -- rval |= BMI2 -- } -- } -- if ebx&(1<<2) != 0 { -- rval |= SGX -- } -- if ebx&(1<<4) != 0 { -- rval |= HLE -- } -- if ebx&(1<<9) != 0 { -- rval |= ERMS -- } -- if ebx&(1<<11) != 0 { -- rval |= RTM -- } -- if ebx&(1<<14) != 0 { -- rval |= MPX -- } -- if ebx&(1<<18) != 0 { -- rval |= RDSEED -- } -- if ebx&(1<<19) != 0 { -- rval |= ADX -- } -- if ebx&(1<<29) != 0 { -- rval |= SHA -- } -- if edx&(1<<26) != 0 { -- rval |= IBPB -- } -- if edx&(1<<27) != 0 { -- rval |= STIBP -- } -- -- // Only detect AVX-512 features if XGETBV is supported -- if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { -- // Check for OS support -- eax, _ := xgetbv(0) -- -- // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and -- // ZMM16-ZMM31 state are enabled by OS) -- /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). -- if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 { -- if ebx&(1<<16) != 0 { -- rval |= AVX512F -- } -- if ebx&(1<<17) != 0 { -- rval |= AVX512DQ -- } -- if ebx&(1<<21) != 0 { -- rval |= AVX512IFMA -- } -- if ebx&(1<<26) != 0 { -- rval |= AVX512PF -- } -- if ebx&(1<<27) != 0 { -- rval |= AVX512ER -- } -- if ebx&(1<<28) != 0 { -- rval |= AVX512CD -- } -- if ebx&(1<<30) != 0 { -- rval |= AVX512BW -- } -- if ebx&(1<<31) != 0 { -- rval |= AVX512VL -- } -- // ecx -- if ecx&(1<<1) != 0 { -- rval |= AVX512VBMI -- } -- } -- } -- } -- -- if maxExtendedFunction() >= 0x80000001 { -- _, _, c, d := cpuid(0x80000001) -- if (c & (1 << 5)) != 0 { -- rval |= LZCNT -- rval |= POPCNT -- } -- if (d & (1 << 31)) != 0 { -- rval |= AMD3DNOW -- } -- if (d & (1 << 30)) != 0 { -- rval |= AMD3DNOWEXT -- } -- if (d & (1 << 23)) != 0 { -- rval |= MMX -- } -- if (d & (1 << 22)) != 0 { -- rval |= MMXEXT -- } -- if (c & (1 << 6)) != 0 { -- rval |= SSE4A -- } -- if d&(1<<20) != 0 { -- rval |= NX -- } -- if d&(1<<27) != 0 { -- rval |= RDTSCP -- } -- -- /* Allow for selectively disabling SSE2 functions on AMD processors -- with SSE2 support but not SSE4a. This includes Athlon64, some -- Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster -- than SSE2 often enough to utilize this special-case flag. -- AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case -- so that SSE2 is used unless explicitly disabled by checking -- AV_CPU_FLAG_SSE2SLOW. */ -- if vendorID() != Intel && -- rval&SSE2 != 0 && (c&0x00000040) == 0 { -- rval |= SSE2SLOW -- } -- -- /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be -- * used unless the OS has AVX support. */ -- if (rval & AVX) != 0 { -- if (c & 0x00000800) != 0 { -- rval |= XOP -- } -- if (c & 0x00010000) != 0 { -- rval |= FMA4 -- } -- } -- -- if vendorID() == Intel { -- family, model := familyModel() -- if family == 6 && (model == 9 || model == 13 || model == 14) { -- /* 6/9 (pentium-m ""banias""), 6/13 (pentium-m ""dothan""), and -- * 6/14 (core1 ""yonah"") theoretically support sse2, but it's -- * usually slower than mmx. */ -- if (rval & SSE2) != 0 { -- rval |= SSE2SLOW -- } -- if (rval & SSE3) != 0 { -- rval |= SSE3SLOW -- } -- } -- /* The Atom processor has SSSE3 support, which is useful in many cases, -- * but sometimes the SSSE3 version is slower than the SSE2 equivalent -- * on the Atom, but is generally faster on other processors supporting -- * SSSE3. This flag allows for selectively disabling certain SSSE3 -- * functions on the Atom. */ -- if family == 6 && model == 28 { -- rval |= ATOM -- } -- } -- } -- return Flags(rval) --} -- --func valAsString(values ...uint32) []byte { -- r := make([]byte, 4*len(values)) -- for i, v := range values { -- dst := r[i*4:] -- dst[0] = byte(v & 0xff) -- dst[1] = byte((v >> 8) & 0xff) -- dst[2] = byte((v >> 16) & 0xff) -- dst[3] = byte((v >> 24) & 0xff) -- switch { -- case dst[0] == 0: -- return r[:i*4] -- case dst[1] == 0: -- return r[:i*4+1] -- case dst[2] == 0: -- return r[:i*4+2] -- case dst[3] == 0: -- return r[:i*4+3] -- } -- } -- return r --} -diff --git a/vendor/github.com/klauspost/cpuid/cpuid_386.s b/vendor/github.com/klauspost/cpuid/cpuid_386.s -deleted file mode 100644 -index 4d731711e48f2..0000000000000 ---- a/vendor/github.com/klauspost/cpuid/cpuid_386.s -+++ /dev/null -@@ -1,42 +0,0 @@ --// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. -- --// +build 386,!gccgo -- --// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) --TEXT ·asmCpuid(SB), 7, $0 -- XORL CX, CX -- MOVL op+0(FP), AX -- CPUID -- MOVL AX, eax+4(FP) -- MOVL BX, ebx+8(FP) -- MOVL CX, ecx+12(FP) -- MOVL DX, edx+16(FP) -- RET -- --// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) --TEXT ·asmCpuidex(SB), 7, $0 -- MOVL op+0(FP), AX -- MOVL op2+4(FP), CX -- CPUID -- MOVL AX, eax+8(FP) -- MOVL BX, ebx+12(FP) -- MOVL CX, ecx+16(FP) -- MOVL DX, edx+20(FP) -- RET -- --// func xgetbv(index uint32) (eax, edx uint32) --TEXT ·asmXgetbv(SB), 7, $0 -- MOVL index+0(FP), CX -- BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV -- MOVL AX, eax+4(FP) -- MOVL DX, edx+8(FP) -- RET -- --// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) --TEXT ·asmRdtscpAsm(SB), 7, $0 -- BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP -- MOVL AX, eax+0(FP) -- MOVL BX, ebx+4(FP) -- MOVL CX, ecx+8(FP) -- MOVL DX, edx+12(FP) -- RET -diff --git a/vendor/github.com/klauspost/cpuid/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/cpuid_amd64.s -deleted file mode 100644 -index 3c1d60e422125..0000000000000 ---- a/vendor/github.com/klauspost/cpuid/cpuid_amd64.s -+++ /dev/null -@@ -1,42 +0,0 @@ --// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. -- --//+build amd64,!gccgo -- --// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) --TEXT ·asmCpuid(SB), 7, $0 -- XORQ CX, CX -- MOVL op+0(FP), AX -- CPUID -- MOVL AX, eax+8(FP) -- MOVL BX, ebx+12(FP) -- MOVL CX, ecx+16(FP) -- MOVL DX, edx+20(FP) -- RET -- --// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) --TEXT ·asmCpuidex(SB), 7, $0 -- MOVL op+0(FP), AX -- MOVL op2+4(FP), CX -- CPUID -- MOVL AX, eax+8(FP) -- MOVL BX, ebx+12(FP) -- MOVL CX, ecx+16(FP) -- MOVL DX, edx+20(FP) -- RET -- --// func asmXgetbv(index uint32) (eax, edx uint32) --TEXT ·asmXgetbv(SB), 7, $0 -- MOVL index+0(FP), CX -- BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV -- MOVL AX, eax+8(FP) -- MOVL DX, edx+12(FP) -- RET -- --// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) --TEXT ·asmRdtscpAsm(SB), 7, $0 -- BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP -- MOVL AX, eax+0(FP) -- MOVL BX, ebx+4(FP) -- MOVL CX, ecx+8(FP) -- MOVL DX, edx+12(FP) -- RET -diff --git a/vendor/github.com/klauspost/cpuid/detect_intel.go b/vendor/github.com/klauspost/cpuid/detect_intel.go -deleted file mode 100644 -index a5f04dd6d0a77..0000000000000 ---- a/vendor/github.com/klauspost/cpuid/detect_intel.go -+++ /dev/null -@@ -1,17 +0,0 @@ --// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. -- --// +build 386,!gccgo amd64,!gccgo -- --package cpuid -- --func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) --func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) --func asmXgetbv(index uint32) (eax, edx uint32) --func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) -- --func initCPU() { -- cpuid = asmCpuid -- cpuidex = asmCpuidex -- xgetbv = asmXgetbv -- rdtscpAsm = asmRdtscpAsm --} -diff --git a/vendor/github.com/klauspost/cpuid/detect_ref.go b/vendor/github.com/klauspost/cpuid/detect_ref.go -deleted file mode 100644 -index 909c5d9a7aed6..0000000000000 ---- a/vendor/github.com/klauspost/cpuid/detect_ref.go -+++ /dev/null -@@ -1,23 +0,0 @@ --// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. -- --// +build !amd64,!386 gccgo -- --package cpuid -- --func initCPU() { -- cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) { -- return 0, 0, 0, 0 -- } -- -- cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) { -- return 0, 0, 0, 0 -- } -- -- xgetbv = func(index uint32) (eax, edx uint32) { -- return 0, 0 -- } -- -- rdtscpAsm = func() (eax, ebx, ecx, edx uint32) { -- return 0, 0, 0, 0 -- } --} -diff --git a/vendor/github.com/klauspost/cpuid/generate.go b/vendor/github.com/klauspost/cpuid/generate.go -deleted file mode 100644 -index 90e7a98d278da..0000000000000 ---- a/vendor/github.com/klauspost/cpuid/generate.go -+++ /dev/null -@@ -1,4 +0,0 @@ --package cpuid -- --//go:generate go run private-gen.go --//go:generate gofmt -w ./private -diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore -new file mode 100644 -index 0000000000000..5e987350471d0 ---- /dev/null -+++ b/vendor/github.com/pierrec/lz4/.gitignore -@@ -0,0 +1,34 @@ -+# Created by https://www.gitignore.io/api/macos -+ -+### macOS ### -+*.DS_Store -+.AppleDouble -+.LSOverride -+ -+# Icon must end with two \r -+Icon -+ -+ -+# Thumbnails -+._* -+ -+# Files that might appear in the root of a volume -+.DocumentRevisions-V100 -+.fseventsd -+.Spotlight-V100 -+.TemporaryItems -+.Trashes -+.VolumeIcon.icns -+.com.apple.timemachine.donotpresent -+ -+# Directories potentially created on remote AFP share -+.AppleDB -+.AppleDesktop -+Network Trash Folder -+Temporary Items -+.apdisk -+ -+# End of https://www.gitignore.io/api/macos -+ -+cmd/*/*exe -+.idea -\ No newline at end of file -diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml -new file mode 100644 -index 0000000000000..fd6c6db713d3a ---- /dev/null -+++ b/vendor/github.com/pierrec/lz4/.travis.yml -@@ -0,0 +1,24 @@ -+language: go -+ -+env: -+ - GO111MODULE=off -+ -+go: -+ - 1.9.x -+ - 1.10.x -+ - 1.11.x -+ - 1.12.x -+ - master -+ -+matrix: -+ fast_finish: true -+ allow_failures: -+ - go: master -+ -+sudo: false -+ -+script: -+ - go test -v -cpu=2 -+ - go test -v -cpu=2 -race -+ - go test -v -cpu=2 -tags noasm -+ - go test -v -cpu=2 -race -tags noasm -diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE -new file mode 100644 -index 0000000000000..bd899d8353dd5 ---- /dev/null -+++ b/vendor/github.com/pierrec/lz4/LICENSE -@@ -0,0 +1,28 @@ -+Copyright (c) 2015, Pierre Curto -+All rights reserved. -+ -+Redistribution and use in source and binary forms, with or without -+modification, are permitted provided that the following conditions are met: -+ -+* Redistributions of source code must retain the above copyright notice, this -+ list of conditions and the following disclaimer. -+ -+* Redistributions in binary form must reproduce the above copyright notice, -+ this list of conditions and the following disclaimer in the documentation -+ and/or other materials provided with the distribution. -+ -+* Neither the name of xxHash nor the names of its -+ contributors may be used to endorse or promote products derived from -+ this software without specific prior written permission. -+ -+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ""AS IS"" -+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md -new file mode 100644 -index 0000000000000..4ee388e81bfb9 ---- /dev/null -+++ b/vendor/github.com/pierrec/lz4/README.md -@@ -0,0 +1,90 @@ -+# lz4 : LZ4 compression in pure Go -+ -+[![GoDoc](https://godoc.org/github.com/pierrec/lz4?status.svg)](https://godoc.org/github.com/pierrec/lz4) -+[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4) -+[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4) -+[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags) -+ -+## Overview -+ -+This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks. -+The implementation is based on the reference C [one](https://github.com/lz4/lz4). -+ -+## Install -+ -+Assuming you have the go toolchain installed: -+ -+``` -+go get github.com/pierrec/lz4 -+``` -+ -+There is a command line interface tool to compress and decompress LZ4 files. -+ -+``` -+go install github.com/pierrec/lz4/cmd/lz4c -+``` -+ -+Usage -+ -+``` -+Usage of lz4c: -+ -version -+ print the program version -+ -+Subcommands: -+Compress the given files or from stdin to stdout. -+compress [arguments] [ ...] -+ -bc -+ enable block checksum -+ -l int -+ compression level (0=fastest) -+ -sc -+ disable stream checksum -+ -size string -+ block max size [64K,256K,1M,4M] (default ""4M"") -+ -+Uncompress the given files or from stdin to stdout. -+uncompress [arguments] [ ...] -+ -+``` -+ -+ -+## Example -+ -+``` -+// Compress and uncompress an input string. -+s := ""hello world"" -+r := strings.NewReader(s) -+ -+// The pipe will uncompress the data from the writer. -+pr, pw := io.Pipe() -+zw := lz4.NewWriter(pw) -+zr := lz4.NewReader(pr) -+ -+go func() { -+ // Compress the input string. -+ _, _ = io.Copy(zw, r) -+ _ = zw.Close() // Make sure the writer is closed -+ _ = pw.Close() // Terminate the pipe -+}() -+ -+_, _ = io.Copy(os.Stdout, zr) -+ -+// Output: -+// hello world -+``` -+ -+## Contributing -+ -+Contributions are very welcome for bug fixing, performance improvements...! -+ -+- Open an issue with a proper description -+- Send a pull request with appropriate test case(s) -+ -+## Contributors -+ -+Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far! -+ -+Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder. -+ -+Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code. -diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go -new file mode 100644 -index 0000000000000..ee178a992b11a ---- /dev/null -+++ b/vendor/github.com/pierrec/lz4/block.go -@@ -0,0 +1,387 @@ -+package lz4 -+ -+import ( -+ ""encoding/binary"" -+ ""fmt"" -+ ""math/bits"" -+) -+ -+// blockHash hashes the lower 6 bytes into a value < htSize. -+func blockHash(x uint64) uint32 { -+ const prime6bytes = 227718039650203 -+ return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) -+} -+ -+// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. -+func CompressBlockBound(n int) int { -+ return n + n/255 + 16 -+} -+ -+// UncompressBlock uncompresses the source buffer into the destination one, -+// and returns the uncompressed size. -+// -+// The destination buffer must be sized appropriately. -+// -+// An error is returned if the source data is invalid or the destination buffer is too small. -+func UncompressBlock(src, dst []byte) (int, error) { -+ if len(src) == 0 { -+ return 0, nil -+ } -+ if di := decodeBlock(dst, src); di >= 0 { -+ return di, nil -+ } -+ return 0, ErrInvalidSourceShortBuffer -+} -+ -+// CompressBlock compresses the source buffer into the destination one. -+// This is the fast version of LZ4 compression and also the default one. -+// The size of hashTable must be at least 64Kb. -+// -+// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible. -+// -+// An error is returned if the destination buffer is too small. -+func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) { -+ if len(hashTable) < htSize { -+ return 0, fmt.Errorf(""hash table too small, should be at least %d in size"", htSize) -+ } -+ defer recoverBlock(&err) -+ -+ // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. -+ // This significantly speeds up incompressible data and usually has very small impact on compresssion. -+ // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) -+ const adaptSkipLog = 7 -+ sn, dn := len(src)-mfLimit, len(dst) -+ if sn <= 0 || dn == 0 { -+ return 0, nil -+ } -+ // Prove to the compiler the table has at least htSize elements. -+ // The compiler can see that ""uint32() >> hashShift"" cannot be out of bounds. -+ hashTable = hashTable[:htSize] -+ -+ // si: Current position of the search. -+ // anchor: Position of the current literals. -+ var si, di, anchor int -+ -+ // Fast scan strategy: the hash table only stores the last 4 bytes sequences. -+ for si < sn { -+ // Hash the next 6 bytes (sequence)... -+ match := binary.LittleEndian.Uint64(src[si:]) -+ h := blockHash(match) -+ h2 := blockHash(match >> 8) -+ -+ // We check a match at s, s+1 and s+2 and pick the first one we get. -+ // Checking 3 only requires us to load the source one. -+ ref := hashTable[h] -+ ref2 := hashTable[h2] -+ hashTable[h] = si -+ hashTable[h2] = si + 1 -+ offset := si - ref -+ -+ // If offset <= 0 we got an old entry in the hash table. -+ if offset <= 0 || offset >= winSize || // Out of window. -+ uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches. -+ // No match. Start calculating another hash. -+ // The processor can usually do this out-of-order. -+ h = blockHash(match >> 16) -+ ref = hashTable[h] -+ -+ // Check the second match at si+1 -+ si += 1 -+ offset = si - ref2 -+ -+ if offset <= 0 || offset >= winSize || -+ uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) { -+ // No match. Check the third match at si+2 -+ si += 1 -+ offset = si - ref -+ hashTable[h] = si -+ -+ if offset <= 0 || offset >= winSize || -+ uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) { -+ // Skip one extra byte (at si+3) before we check 3 matches again. -+ si += 2 + (si-anchor)>>adaptSkipLog -+ continue -+ } -+ } -+ } -+ -+ // Match found. -+ lLen := si - anchor // Literal length. -+ // We already matched 4 bytes. -+ mLen := 4 -+ -+ // Extend backwards if we can, reducing literals. -+ tOff := si - offset - 1 -+ for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] { -+ si-- -+ tOff-- -+ lLen-- -+ mLen++ -+ } -+ -+ // Add the match length, so we continue search at the end. -+ // Use mLen to store the offset base. -+ si, mLen = si+mLen, si+minMatch -+ -+ // Find the longest match by looking by batches of 8 bytes. -+ for si < sn { -+ x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) -+ if x == 0 { -+ si += 8 -+ } else { -+ // Stop is first non-zero byte. -+ si += bits.TrailingZeros64(x) >> 3 -+ break -+ } -+ } -+ -+ mLen = si - mLen -+ if mLen < 0xF { -+ dst[di] = byte(mLen) -+ } else { -+ dst[di] = 0xF -+ } -+ -+ // Encode literals length. -+ if lLen < 0xF { -+ dst[di] |= byte(lLen << 4) -+ } else { -+ dst[di] |= 0xF0 -+ di++ -+ l := lLen - 0xF -+ for ; l >= 0xFF; l -= 0xFF { -+ dst[di] = 0xFF -+ di++ -+ } -+ dst[di] = byte(l) -+ } -+ di++ -+ -+ // Literals. -+ copy(dst[di:di+lLen], src[anchor:anchor+lLen]) -+ di += lLen + 2 -+ anchor = si -+ -+ // Encode offset. -+ _ = dst[di] // Bound check elimination. -+ dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) -+ -+ // Encode match length part 2. -+ if mLen >= 0xF { -+ for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { -+ dst[di] = 0xFF -+ di++ -+ } -+ dst[di] = byte(mLen) -+ di++ -+ } -+ // Check if we can load next values. -+ if si >= sn { -+ break -+ } -+ // Hash match end-2 -+ h = blockHash(binary.LittleEndian.Uint64(src[si-2:])) -+ hashTable[h] = si - 2 -+ } -+ -+ if anchor == 0 { -+ // Incompressible. -+ return 0, nil -+ } -+ -+ // Last literals. -+ lLen := len(src) - anchor -+ if lLen < 0xF { -+ dst[di] = byte(lLen << 4) -+ } else { -+ dst[di] = 0xF0 -+ di++ -+ for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF { -+ dst[di] = 0xFF -+ di++ -+ } -+ dst[di] = byte(lLen) -+ } -+ di++ -+ -+ // Write the last literals. -+ if di >= anchor { -+ // Incompressible. -+ return 0, nil -+ } -+ di += copy(dst[di:di+len(src)-anchor], src[anchor:]) -+ return di, nil -+} -+ -+// blockHash hashes 4 bytes into a value < winSize. -+func blockHashHC(x uint32) uint32 { -+ const hasher uint32 = 2654435761 // Knuth multiplicative hash. -+ return x * hasher >> (32 - winSizeLog) -+} -+ -+// CompressBlockHC compresses the source buffer src into the destination dst -+// with max search depth (use 0 or negative value for no max). -+// -+// CompressBlockHC compression ratio is better than CompressBlock but it is also slower. -+// -+// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible. -+// -+// An error is returned if the destination buffer is too small. -+func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) { -+ defer recoverBlock(&err) -+ -+ // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. -+ // This significantly speeds up incompressible data and usually has very small impact on compresssion. -+ // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) -+ const adaptSkipLog = 7 -+ -+ sn, dn := len(src)-mfLimit, len(dst) -+ if sn <= 0 || dn == 0 { -+ return 0, nil -+ } -+ var si, di int -+ -+ // hashTable: stores the last position found for a given hash -+ // chainTable: stores previous positions for a given hash -+ var hashTable, chainTable [winSize]int -+ -+ if depth <= 0 { -+ depth = winSize -+ } -+ -+ anchor := si -+ for si < sn { -+ // Hash the next 4 bytes (sequence). -+ match := binary.LittleEndian.Uint32(src[si:]) -+ h := blockHashHC(match) -+ -+ // Follow the chain until out of window and give the longest match. -+ mLen := 0 -+ offset := 0 -+ for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] { -+ // The first (mLen==0) or next byte (mLen>=minMatch) at current match length -+ // must match to improve on the match length. -+ if src[next+mLen] != src[si+mLen] { -+ continue -+ } -+ ml := 0 -+ // Compare the current position with a previous with the same hash. -+ for ml < sn-si { -+ x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:]) -+ if x == 0 { -+ ml += 8 -+ } else { -+ // Stop is first non-zero byte. -+ ml += bits.TrailingZeros64(x) >> 3 -+ break -+ } -+ } -+ if ml < minMatch || ml <= mLen { -+ // Match too small (>adaptSkipLog -+ continue -+ } -+ -+ // Match found. -+ // Update hash/chain tables with overlapping bytes: -+ // si already hashed, add everything from si+1 up to the match length. -+ winStart := si + 1 -+ if ws := si + mLen - winSize; ws > winStart { -+ winStart = ws -+ } -+ for si, ml := winStart, si+mLen; si < ml; { -+ match >>= 8 -+ match |= uint32(src[si+3]) << 24 -+ h := blockHashHC(match) -+ chainTable[si&winMask] = hashTable[h] -+ hashTable[h] = si -+ si++ -+ } -+ -+ lLen := si - anchor -+ si += mLen -+ mLen -= minMatch // Match length does not include minMatch. -+ -+ if mLen < 0xF { -+ dst[di] = byte(mLen) -+ } else { -+ dst[di] = 0xF -+ } -+ -+ // Encode literals length. -+ if lLen < 0xF { -+ dst[di] |= byte(lLen << 4) -+ } else { -+ dst[di] |= 0xF0 -+ di++ -+ l := lLen - 0xF -+ for ; l >= 0xFF; l -= 0xFF { -+ dst[di] = 0xFF -+ di++ -+ } -+ dst[di] = byte(l) -+ } -+ di++ -+ -+ // Literals. -+ copy(dst[di:di+lLen], src[anchor:anchor+lLen]) -+ di += lLen -+ anchor = si -+ -+ // Encode offset. -+ di += 2 -+ dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) -+ -+ // Encode match length part 2. -+ if mLen >= 0xF { -+ for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { -+ dst[di] = 0xFF -+ di++ -+ } -+ dst[di] = byte(mLen) -+ di++ -+ } -+ } -+ -+ if anchor == 0 { -+ // Incompressible. -+ return 0, nil -+ } -+ -+ // Last literals. -+ lLen := len(src) - anchor -+ if lLen < 0xF { -+ dst[di] = byte(lLen << 4) -+ } else { -+ dst[di] = 0xF0 -+ di++ -+ lLen -= 0xF -+ for ; lLen >= 0xFF; lLen -= 0xFF { -+ dst[di] = 0xFF -+ di++ -+ } -+ dst[di] = byte(lLen) -+ } -+ di++ -+ -+ // Write the last literals. -+ if di >= anchor { -+ // Incompressible. -+ return 0, nil -+ } -+ di += copy(dst[di:di+len(src)-anchor], src[anchor:]) -+ return di, nil -+} -diff --git a/vendor/github.com/pierrec/lz4/debug.go b/vendor/github.com/pierrec/lz4/debug.go -new file mode 100644 -index 0000000000000..bc5e78d40f0a3 ---- /dev/null -+++ b/vendor/github.com/pierrec/lz4/debug.go -@@ -0,0 +1,23 @@ -+// +build lz4debug -+ -+package lz4 -+ -+import ( -+ ""fmt"" -+ ""os"" -+ ""path/filepath"" -+ ""runtime"" -+) -+ -+const debugFlag = true -+ -+func debug(args ...interface{}) { -+ _, file, line, _ := runtime.Caller(1) -+ file = filepath.Base(file) -+ -+ f := fmt.Sprintf(""LZ4: %s:%d %s"", file, line, args[0]) -+ if f[len(f)-1] != '\n' { -+ f += ""\n"" -+ } -+ fmt.Fprintf(os.Stderr, f, args[1:]...) -+} -diff --git a/vendor/github.com/pierrec/lz4/debug_stub.go b/vendor/github.com/pierrec/lz4/debug_stub.go -new file mode 100644 -index 0000000000000..44211ad96453b ---- /dev/null -+++ b/vendor/github.com/pierrec/lz4/debug_stub.go -@@ -0,0 +1,7 @@ -+// +build !lz4debug -+ -+package lz4 -+ -+const debugFlag = false -+ -+func debug(args ...interface{}) {} -diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.go b/vendor/github.com/pierrec/lz4/decode_amd64.go -new file mode 100644 -index 0000000000000..43cc14fbe2e37 ---- /dev/null -+++ b/vendor/github.com/pierrec/lz4/decode_amd64.go -@@ -0,0 +1,8 @@ -+// +build !appengine -+// +build gc -+// +build !noasm -+ -+package lz4 -+ -+//go:noescape -+func decodeBlock(dst, src []byte) int -diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.s b/vendor/github.com/pierrec/lz4/decode_amd64.s -new file mode 100644 -index 0000000000000..20fef39759cb6 ---- /dev/null -+++ b/vendor/github.com/pierrec/lz4/decode_amd64.s -@@ -0,0 +1,375 @@ -+// +build !appengine -+// +build gc -+// +build !noasm -+ -+#include ""textflag.h"" -+ -+// AX scratch -+// BX scratch -+// CX scratch -+// DX token -+// -+// DI &dst -+// SI &src -+// R8 &dst + len(dst) -+// R9 &src + len(src) -+// R11 &dst -+// R12 short output end -+// R13 short input end -+// func decodeBlock(dst, src []byte) int -+// using 50 bytes of stack currently -+TEXT ·decodeBlock(SB), NOSPLIT, $64-56 -+ MOVQ dst_base+0(FP), DI -+ MOVQ DI, R11 -+ MOVQ dst_len+8(FP), R8 -+ ADDQ DI, R8 -+ -+ MOVQ src_base+24(FP), SI -+ MOVQ src_len+32(FP), R9 -+ ADDQ SI, R9 -+ -+ // shortcut ends -+ // short output end -+ MOVQ R8, R12 -+ SUBQ $32, R12 -+ // short input end -+ MOVQ R9, R13 -+ SUBQ $16, R13 -+ -+loop: -+ // for si < len(src) -+ CMPQ SI, R9 -+ JGE end -+ -+ // token := uint32(src[si]) -+ MOVBQZX (SI), DX -+ INCQ SI -+ -+ // lit_len = token >> 4 -+ // if lit_len > 0 -+ // CX = lit_len -+ MOVQ DX, CX -+ SHRQ $4, CX -+ -+ // if lit_len != 0xF -+ CMPQ CX, $0xF -+ JEQ lit_len_loop_pre -+ CMPQ DI, R12 -+ JGE lit_len_loop_pre -+ CMPQ SI, R13 -+ JGE lit_len_loop_pre -+ -+ // copy shortcut -+ -+ // A two-stage shortcut for the most common case: -+ // 1) If the literal length is 0..14, and there is enough space, -+ // enter the shortcut and copy 16 bytes on behalf of the literals -+ // (in the fast mode, only 8 bytes can be safely copied this way). -+ // 2) Further if the match length is 4..18, copy 18 bytes in a similar -+ // manner; but we ensure that there's enough space in the output for -+ // those 18 bytes earlier, upon entering the shortcut (in other words, -+ // there is a combined check for both stages). -+ -+ // copy literal -+ MOVOU (SI), X0 -+ MOVOU X0, (DI) -+ ADDQ CX, DI -+ ADDQ CX, SI -+ -+ MOVQ DX, CX -+ ANDQ $0xF, CX -+ -+ // The second stage: prepare for match copying, decode full info. -+ // If it doesn't work out, the info won't be wasted. -+ // offset := uint16(data[:2]) -+ MOVWQZX (SI), DX -+ ADDQ $2, SI -+ -+ MOVQ DI, AX -+ SUBQ DX, AX -+ CMPQ AX, DI -+ JGT err_short_buf -+ -+ // if we can't do the second stage then jump straight to read the -+ // match length, we already have the offset. -+ CMPQ CX, $0xF -+ JEQ match_len_loop_pre -+ CMPQ DX, $8 -+ JLT match_len_loop_pre -+ CMPQ AX, R11 -+ JLT err_short_buf -+ -+ // memcpy(op + 0, match + 0, 8); -+ MOVQ (AX), BX -+ MOVQ BX, (DI) -+ // memcpy(op + 8, match + 8, 8); -+ MOVQ 8(AX), BX -+ MOVQ BX, 8(DI) -+ // memcpy(op +16, match +16, 2); -+ MOVW 16(AX), BX -+ MOVW BX, 16(DI) -+ -+ ADDQ $4, DI // minmatch -+ ADDQ CX, DI -+ -+ // shortcut complete, load next token -+ JMP loop -+ -+lit_len_loop_pre: -+ // if lit_len > 0 -+ CMPQ CX, $0 -+ JEQ offset -+ CMPQ CX, $0xF -+ JNE copy_literal -+ -+lit_len_loop: -+ // for src[si] == 0xFF -+ CMPB (SI), $0xFF -+ JNE lit_len_finalise -+ -+ // bounds check src[si+1] -+ MOVQ SI, AX -+ ADDQ $1, AX -+ CMPQ AX, R9 -+ JGT err_short_buf -+ -+ // lit_len += 0xFF -+ ADDQ $0xFF, CX -+ INCQ SI -+ JMP lit_len_loop -+ -+lit_len_finalise: -+ // lit_len += int(src[si]) -+ // si++ -+ MOVBQZX (SI), AX -+ ADDQ AX, CX -+ INCQ SI -+ -+copy_literal: -+ // bounds check src and dst -+ MOVQ SI, AX -+ ADDQ CX, AX -+ CMPQ AX, R9 -+ JGT err_short_buf -+ -+ MOVQ DI, AX -+ ADDQ CX, AX -+ CMPQ AX, R8 -+ JGT err_short_buf -+ -+ // whats a good cut off to call memmove? -+ CMPQ CX, $16 -+ JGT memmove_lit -+ -+ // if len(dst[di:]) < 16 -+ MOVQ R8, AX -+ SUBQ DI, AX -+ CMPQ AX, $16 -+ JLT memmove_lit -+ -+ // if len(src[si:]) < 16 -+ MOVQ R9, AX -+ SUBQ SI, AX -+ CMPQ AX, $16 -+ JLT memmove_lit -+ -+ MOVOU (SI), X0 -+ MOVOU X0, (DI) -+ -+ JMP finish_lit_copy -+ -+memmove_lit: -+ // memmove(to, from, len) -+ MOVQ DI, 0(SP) -+ MOVQ SI, 8(SP) -+ MOVQ CX, 16(SP) -+ // spill -+ MOVQ DI, 24(SP) -+ MOVQ SI, 32(SP) -+ MOVQ CX, 40(SP) // need len to inc SI, DI after -+ MOVB DX, 48(SP) -+ CALL runtime·memmove(SB) -+ -+ // restore registers -+ MOVQ 24(SP), DI -+ MOVQ 32(SP), SI -+ MOVQ 40(SP), CX -+ MOVB 48(SP), DX -+ -+ // recalc initial values -+ MOVQ dst_base+0(FP), R8 -+ MOVQ R8, R11 -+ ADDQ dst_len+8(FP), R8 -+ MOVQ src_base+24(FP), R9 -+ ADDQ src_len+32(FP), R9 -+ MOVQ R8, R12 -+ SUBQ $32, R12 -+ MOVQ R9, R13 -+ SUBQ $16, R13 -+ -+finish_lit_copy: -+ ADDQ CX, SI -+ ADDQ CX, DI -+ -+ CMPQ SI, R9 -+ JGE end -+ -+offset: -+ // CX := mLen -+ // free up DX to use for offset -+ MOVQ DX, CX -+ -+ MOVQ SI, AX -+ ADDQ $2, AX -+ CMPQ AX, R9 -+ JGT err_short_buf -+ -+ // offset -+ // DX := int(src[si]) | int(src[si+1])<<8 -+ MOVWQZX (SI), DX -+ ADDQ $2, SI -+ -+ // 0 offset is invalid -+ CMPQ DX, $0 -+ JEQ err_corrupt -+ -+ ANDB $0xF, CX -+ -+match_len_loop_pre: -+ // if mlen != 0xF -+ CMPB CX, $0xF -+ JNE copy_match -+ -+match_len_loop: -+ // for src[si] == 0xFF -+ // lit_len += 0xFF -+ CMPB (SI), $0xFF -+ JNE match_len_finalise -+ -+ // bounds check src[si+1] -+ MOVQ SI, AX -+ ADDQ $1, AX -+ CMPQ AX, R9 -+ JGT err_short_buf -+ -+ ADDQ $0xFF, CX -+ INCQ SI -+ JMP match_len_loop -+ -+match_len_finalise: -+ // lit_len += int(src[si]) -+ // si++ -+ MOVBQZX (SI), AX -+ ADDQ AX, CX -+ INCQ SI -+ -+copy_match: -+ // mLen += minMatch -+ ADDQ $4, CX -+ -+ // check we have match_len bytes left in dst -+ // di+match_len < len(dst) -+ MOVQ DI, AX -+ ADDQ CX, AX -+ CMPQ AX, R8 -+ JGT err_short_buf -+ -+ // DX = offset -+ // CX = match_len -+ // BX = &dst + (di - offset) -+ MOVQ DI, BX -+ SUBQ DX, BX -+ -+ // check BX is within dst -+ // if BX < &dst -+ CMPQ BX, R11 -+ JLT err_short_buf -+ -+ // if offset + match_len < di -+ MOVQ BX, AX -+ ADDQ CX, AX -+ CMPQ DI, AX -+ JGT copy_interior_match -+ -+ // AX := len(dst[:di]) -+ // MOVQ DI, AX -+ // SUBQ R11, AX -+ -+ // copy 16 bytes at a time -+ // if di-offset < 16 copy 16-(di-offset) bytes to di -+ // then do the remaining -+ -+copy_match_loop: -+ // for match_len >= 0 -+ // dst[di] = dst[i] -+ // di++ -+ // i++ -+ MOVB (BX), AX -+ MOVB AX, (DI) -+ INCQ DI -+ INCQ BX -+ DECQ CX -+ -+ CMPQ CX, $0 -+ JGT copy_match_loop -+ -+ JMP loop -+ -+copy_interior_match: -+ CMPQ CX, $16 -+ JGT memmove_match -+ -+ // if len(dst[di:]) < 16 -+ MOVQ R8, AX -+ SUBQ DI, AX -+ CMPQ AX, $16 -+ JLT memmove_match -+ -+ MOVOU (BX), X0 -+ MOVOU X0, (DI) -+ -+ ADDQ CX, DI -+ JMP loop -+ -+memmove_match: -+ // memmove(to, from, len) -+ MOVQ DI, 0(SP) -+ MOVQ BX, 8(SP) -+ MOVQ CX, 16(SP) -+ // spill -+ MOVQ DI, 24(SP) -+ MOVQ SI, 32(SP) -+ MOVQ CX, 40(SP) // need len to inc SI, DI after -+ CALL runtime·memmove(SB) -+ -+ // restore registers -+ MOVQ 24(SP), DI -+ MOVQ 32(SP), SI -+ MOVQ 40(SP), CX -+ -+ // recalc initial values -+ MOVQ dst_base+0(FP), R8 -+ MOVQ R8, R11 // TODO: make these sensible numbers -+ ADDQ dst_len+8(FP), R8 -+ MOVQ src_base+24(FP), R9 -+ ADDQ src_len+32(FP), R9 -+ MOVQ R8, R12 -+ SUBQ $32, R12 -+ MOVQ R9, R13 -+ SUBQ $16, R13 -+ -+ ADDQ CX, DI -+ JMP loop -+ -+err_corrupt: -+ MOVQ $-1, ret+48(FP) -+ RET -+ -+err_short_buf: -+ MOVQ $-2, ret+48(FP) -+ RET -+ -+end: -+ SUBQ R11, DI -+ MOVQ DI, ret+48(FP) -+ RET -diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go -new file mode 100644 -index 0000000000000..919888edf7dcc ---- /dev/null -+++ b/vendor/github.com/pierrec/lz4/decode_other.go -@@ -0,0 +1,98 @@ -+// +build !amd64 appengine !gc noasm -+ -+package lz4 -+ -+func decodeBlock(dst, src []byte) (ret int) { -+ const hasError = -2 -+ defer func() { -+ if recover() != nil { -+ ret = hasError -+ } -+ }() -+ -+ var si, di int -+ for { -+ // Literals and match lengths (token). -+ b := int(src[si]) -+ si++ -+ -+ // Literals. -+ if lLen := b >> 4; lLen > 0 { -+ switch { -+ case lLen < 0xF && si+16 < len(src): -+ // Shortcut 1 -+ // if we have enough room in src and dst, and the literals length -+ // is small enough (0..14) then copy all 16 bytes, even if not all -+ // are part of the literals. -+ copy(dst[di:], src[si:si+16]) -+ si += lLen -+ di += lLen -+ if mLen := b & 0xF; mLen < 0xF { -+ // Shortcut 2 -+ // if the match length (4..18) fits within the literals, then copy -+ // all 18 bytes, even if not all are part of the literals. -+ mLen += 4 -+ if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset { -+ i := di - offset -+ end := i + 18 -+ if end > len(dst) { -+ // The remaining buffer may not hold 18 bytes. -+ // See https://github.com/pierrec/lz4/issues/51. -+ end = len(dst) -+ } -+ copy(dst[di:], dst[i:end]) -+ si += 2 -+ di += mLen -+ continue -+ } -+ } -+ case lLen == 0xF: -+ for src[si] == 0xFF { -+ lLen += 0xFF -+ si++ -+ } -+ lLen += int(src[si]) -+ si++ -+ fallthrough -+ default: -+ copy(dst[di:di+lLen], src[si:si+lLen]) -+ si += lLen -+ di += lLen -+ } -+ } -+ if si >= len(src) { -+ return di -+ } -+ -+ offset := int(src[si]) | int(src[si+1])<<8 -+ if offset == 0 { -+ return hasError -+ } -+ si += 2 -+ -+ // Match. -+ mLen := b & 0xF -+ if mLen == 0xF { -+ for src[si] == 0xFF { -+ mLen += 0xFF -+ si++ -+ } -+ mLen += int(src[si]) -+ si++ -+ } -+ mLen += minMatch -+ -+ // Copy the match. -+ expanded := dst[di-offset:] -+ if mLen > offset { -+ // Efficiently copy the match dst[di-offset:di] into the dst slice. -+ bytesToCopy := offset * (mLen / offset) -+ for n := offset; n <= bytesToCopy+offset; n *= 2 { -+ copy(expanded[n:], expanded[:n]) -+ } -+ di += bytesToCopy -+ mLen -= bytesToCopy -+ } -+ di += copy(dst[di:di+mLen], expanded[:mLen]) -+ } -+} -diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go -new file mode 100644 -index 0000000000000..1c45d1813cef4 ---- /dev/null -+++ b/vendor/github.com/pierrec/lz4/errors.go -@@ -0,0 +1,30 @@ -+package lz4 -+ -+import ( -+ ""errors"" -+ ""fmt"" -+ ""os"" -+ rdebug ""runtime/debug"" -+) -+ -+var ( -+ // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed -+ // block is corrupted or the destination buffer is not large enough for the uncompressed data. -+ ErrInvalidSourceShortBuffer = errors.New(""lz4: invalid source or destination buffer too short"") -+ // ErrInvalid is returned when reading an invalid LZ4 archive. -+ ErrInvalid = errors.New(""lz4: bad magic number"") -+ // ErrBlockDependency is returned when attempting to decompress an archive created with block dependency. -+ ErrBlockDependency = errors.New(""lz4: block dependency not supported"") -+ // ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position. -+ ErrUnsupportedSeek = errors.New(""lz4: can only seek forward from io.SeekCurrent"") -+) -+ -+func recoverBlock(e *error) { -+ if r := recover(); r != nil && *e == nil { -+ if debugFlag { -+ fmt.Fprintln(os.Stderr, r) -+ rdebug.PrintStack() -+ } -+ *e = ErrInvalidSourceShortBuffer -+ } -+} -diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go -new file mode 100644 -index 0000000000000..7a76a6bce2b58 ---- /dev/null -+++ b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go -@@ -0,0 +1,223 @@ -+// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). -+// (https://github.com/Cyan4973/XXH/) -+package xxh32 -+ -+import ( -+ ""encoding/binary"" -+) -+ -+const ( -+ prime1 uint32 = 2654435761 -+ prime2 uint32 = 2246822519 -+ prime3 uint32 = 3266489917 -+ prime4 uint32 = 668265263 -+ prime5 uint32 = 374761393 -+ -+ primeMask = 0xFFFFFFFF -+ prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984 -+ prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535 -+) -+ -+// XXHZero represents an xxhash32 object with seed 0. -+type XXHZero struct { -+ v1 uint32 -+ v2 uint32 -+ v3 uint32 -+ v4 uint32 -+ totalLen uint64 -+ buf [16]byte -+ bufused int -+} -+ -+// Sum appends the current hash to b and returns the resulting slice. -+// It does not change the underlying hash state. -+func (xxh XXHZero) Sum(b []byte) []byte { -+ h32 := xxh.Sum32() -+ return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) -+} -+ -+// Reset resets the Hash to its initial state. -+func (xxh *XXHZero) Reset() { -+ xxh.v1 = prime1plus2 -+ xxh.v2 = prime2 -+ xxh.v3 = 0 -+ xxh.v4 = prime1minus -+ xxh.totalLen = 0 -+ xxh.bufused = 0 -+} -+ -+// Size returns the number of bytes returned by Sum(). -+func (xxh *XXHZero) Size() int { -+ return 4 -+} -+ -+// BlockSize gives the minimum number of bytes accepted by Write(). -+func (xxh *XXHZero) BlockSize() int { -+ return 1 -+} -+ -+// Write adds input bytes to the Hash. -+// It never returns an error. -+func (xxh *XXHZero) Write(input []byte) (int, error) { -+ if xxh.totalLen == 0 { -+ xxh.Reset() -+ } -+ n := len(input) -+ m := xxh.bufused -+ -+ xxh.totalLen += uint64(n) -+ -+ r := len(xxh.buf) - m -+ if n < r { -+ copy(xxh.buf[m:], input) -+ xxh.bufused += len(input) -+ return n, nil -+ } -+ -+ p := 0 -+ // Causes compiler to work directly from registers instead of stack: -+ v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4 -+ if m > 0 { -+ // some data left from previous update -+ copy(xxh.buf[xxh.bufused:], input[:r]) -+ xxh.bufused += len(input) - r -+ -+ // fast rotl(13) -+ buf := xxh.buf[:16] // BCE hint. -+ v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1 -+ v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1 -+ v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1 -+ v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1 -+ p = r -+ xxh.bufused = 0 -+ } -+ -+ for n := n - 16; p <= n; p += 16 { -+ sub := input[p:][:16] //BCE hint for compiler -+ v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 -+ v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 -+ v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 -+ v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 -+ } -+ xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4 -+ -+ copy(xxh.buf[xxh.bufused:], input[p:]) -+ xxh.bufused += len(input) - p -+ -+ return n, nil -+} -+ -+// Sum32 returns the 32 bits Hash value. -+func (xxh *XXHZero) Sum32() uint32 { -+ h32 := uint32(xxh.totalLen) -+ if h32 >= 16 { -+ h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4) -+ } else { -+ h32 += prime5 -+ } -+ -+ p := 0 -+ n := xxh.bufused -+ buf := xxh.buf -+ for n := n - 4; p <= n; p += 4 { -+ h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3 -+ h32 = rol17(h32) * prime4 -+ } -+ for ; p < n; p++ { -+ h32 += uint32(buf[p]) * prime5 -+ h32 = rol11(h32) * prime1 -+ } -+ -+ h32 ^= h32 >> 15 -+ h32 *= prime2 -+ h32 ^= h32 >> 13 -+ h32 *= prime3 -+ h32 ^= h32 >> 16 -+ -+ return h32 -+} -+ -+// ChecksumZero returns the 32bits Hash value. -+func ChecksumZero(input []byte) uint32 { -+ n := len(input) -+ h32 := uint32(n) -+ -+ if n < 16 { -+ h32 += prime5 -+ } else { -+ v1 := prime1plus2 -+ v2 := prime2 -+ v3 := uint32(0) -+ v4 := prime1minus -+ p := 0 -+ for n := n - 16; p <= n; p += 16 { -+ sub := input[p:][:16] //BCE hint for compiler -+ v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 -+ v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 -+ v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 -+ v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 -+ } -+ input = input[p:] -+ n -= p -+ h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) -+ } -+ -+ p := 0 -+ for n := n - 4; p <= n; p += 4 { -+ h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3 -+ h32 = rol17(h32) * prime4 -+ } -+ for p < n { -+ h32 += uint32(input[p]) * prime5 -+ h32 = rol11(h32) * prime1 -+ p++ -+ } -+ -+ h32 ^= h32 >> 15 -+ h32 *= prime2 -+ h32 ^= h32 >> 13 -+ h32 *= prime3 -+ h32 ^= h32 >> 16 -+ -+ return h32 -+} -+ -+// Uint32Zero hashes x with seed 0. -+func Uint32Zero(x uint32) uint32 { -+ h := prime5 + 4 + x*prime3 -+ h = rol17(h) * prime4 -+ h ^= h >> 15 -+ h *= prime2 -+ h ^= h >> 13 -+ h *= prime3 -+ h ^= h >> 16 -+ return h -+} -+ -+func rol1(u uint32) uint32 { -+ return u<<1 | u>>31 -+} -+ -+func rol7(u uint32) uint32 { -+ return u<<7 | u>>25 -+} -+ -+func rol11(u uint32) uint32 { -+ return u<<11 | u>>21 -+} -+ -+func rol12(u uint32) uint32 { -+ return u<<12 | u>>20 -+} -+ -+func rol13(u uint32) uint32 { -+ return u<<13 | u>>19 -+} -+ -+func rol17(u uint32) uint32 { -+ return u<<17 | u>>15 -+} -+ -+func rol18(u uint32) uint32 { -+ return u<<18 | u>>14 -+} -diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go -new file mode 100644 -index 0000000000000..21dcfaeb93d16 ---- /dev/null -+++ b/vendor/github.com/pierrec/lz4/lz4.go -@@ -0,0 +1,113 @@ -+// Package lz4 implements reading and writing lz4 compressed data (a frame), -+// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html. -+// -+// Although the block level compression and decompression functions are exposed and are fully compatible -+// with the lz4 block format definition, they are low level and should not be used directly. -+// For a complete description of an lz4 compressed block, see: -+// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html -+// -+// See https://github.com/Cyan4973/lz4 for the reference C implementation. -+// -+package lz4 -+ -+import ""math/bits"" -+ -+import ""sync"" -+ -+const ( -+ // Extension is the LZ4 frame file name extension -+ Extension = "".lz4"" -+ // Version is the LZ4 frame format version -+ Version = 1 -+ -+ frameMagic uint32 = 0x184D2204 -+ frameSkipMagic uint32 = 0x184D2A50 -+ -+ // The following constants are used to setup the compression algorithm. -+ minMatch = 4 // the minimum size of the match sequence size (4 bytes) -+ winSizeLog = 16 // LZ4 64Kb window size limit -+ winSize = 1 << winSizeLog -+ winMask = winSize - 1 // 64Kb window of previous data for dependent blocks -+ compressedBlockFlag = 1 << 31 -+ compressedBlockMask = compressedBlockFlag - 1 -+ -+ // hashLog determines the size of the hash table used to quickly find a previous match position. -+ // Its value influences the compression speed and memory usage, the lower the faster, -+ // but at the expense of the compression ratio. -+ // 16 seems to be the best compromise for fast compression. -+ hashLog = 16 -+ htSize = 1 << hashLog -+ -+ mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes. -+) -+ -+// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. -+const ( -+ blockSize64K = 1 << (16 + 2*iota) -+ blockSize256K -+ blockSize1M -+ blockSize4M -+) -+ -+var ( -+ // Keep a pool of buffers for each valid block sizes. -+ bsMapValue = [...]*sync.Pool{ -+ newBufferPool(2 * blockSize64K), -+ newBufferPool(2 * blockSize256K), -+ newBufferPool(2 * blockSize1M), -+ newBufferPool(2 * blockSize4M), -+ } -+) -+ -+// newBufferPool returns a pool for buffers of the given size. -+func newBufferPool(size int) *sync.Pool { -+ return &sync.Pool{ -+ New: func() interface{} { -+ return make([]byte, size) -+ }, -+ } -+} -+ -+// getBuffer returns a buffer to its pool. -+func getBuffer(size int) []byte { -+ idx := blockSizeValueToIndex(size) - 4 -+ return bsMapValue[idx].Get().([]byte) -+} -+ -+// putBuffer returns a buffer to its pool. -+func putBuffer(size int, buf []byte) { -+ if cap(buf) > 0 { -+ idx := blockSizeValueToIndex(size) - 4 -+ bsMapValue[idx].Put(buf[:cap(buf)]) -+ } -+} -+func blockSizeIndexToValue(i byte) int { -+ return 1 << (16 + 2*uint(i)) -+} -+func isValidBlockSize(size int) bool { -+ const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M -+ -+ return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1 -+} -+func blockSizeValueToIndex(size int) byte { -+ return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2) -+} -+ -+// Header describes the various flags that can be set on a Writer or obtained from a Reader. -+// The default values match those of the LZ4 frame format definition -+// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). -+// -+// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. -+// It is the caller responsibility to check them if necessary. -+type Header struct { -+ BlockChecksum bool // Compressed blocks checksum flag. -+ NoChecksum bool // Frame checksum flag. -+ BlockMaxSize int // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. -+ Size uint64 // Frame total size. It is _not_ computed by the Writer. -+ CompressionLevel int // Compression level (higher is better, use 0 for fastest compression). -+ done bool // Header processed flag (Read or Write and checked). -+} -+ -+func (h *Header) Reset() { -+ h.done = false -+} -diff --git a/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/vendor/github.com/pierrec/lz4/lz4_go1.10.go -new file mode 100644 -index 0000000000000..9a0fb00709d56 ---- /dev/null -+++ b/vendor/github.com/pierrec/lz4/lz4_go1.10.go -@@ -0,0 +1,29 @@ -+//+build go1.10 -+ -+package lz4 -+ -+import ( -+ ""fmt"" -+ ""strings"" -+) -+ -+func (h Header) String() string { -+ var s strings.Builder -+ -+ s.WriteString(fmt.Sprintf(""%T{"", h)) -+ if h.BlockChecksum { -+ s.WriteString(""BlockChecksum: true "") -+ } -+ if h.NoChecksum { -+ s.WriteString(""NoChecksum: true "") -+ } -+ if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { -+ s.WriteString(fmt.Sprintf(""BlockMaxSize: %d "", bs)) -+ } -+ if l := h.CompressionLevel; l != 0 { -+ s.WriteString(fmt.Sprintf(""CompressionLevel: %d "", l)) -+ } -+ s.WriteByte('}') -+ -+ return s.String() -+} -diff --git a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go -new file mode 100644 -index 0000000000000..12c761a2e7f97 ---- /dev/null -+++ b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go -@@ -0,0 +1,29 @@ -+//+build !go1.10 -+ -+package lz4 -+ -+import ( -+ ""bytes"" -+ ""fmt"" -+) -+ -+func (h Header) String() string { -+ var s bytes.Buffer -+ -+ s.WriteString(fmt.Sprintf(""%T{"", h)) -+ if h.BlockChecksum { -+ s.WriteString(""BlockChecksum: true "") -+ } -+ if h.NoChecksum { -+ s.WriteString(""NoChecksum: true "") -+ } -+ if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { -+ s.WriteString(fmt.Sprintf(""BlockMaxSize: %d "", bs)) -+ } -+ if l := h.CompressionLevel; l != 0 { -+ s.WriteString(fmt.Sprintf(""CompressionLevel: %d "", l)) -+ } -+ s.WriteByte('}') -+ -+ return s.String() -+} -diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go -new file mode 100644 -index 0000000000000..87dd72bd0db3e ---- /dev/null -+++ b/vendor/github.com/pierrec/lz4/reader.go -@@ -0,0 +1,335 @@ -+package lz4 -+ -+import ( -+ ""encoding/binary"" -+ ""fmt"" -+ ""io"" -+ ""io/ioutil"" -+ -+ ""github.com/pierrec/lz4/internal/xxh32"" -+) -+ -+// Reader implements the LZ4 frame decoder. -+// The Header is set after the first call to Read(). -+// The Header may change between Read() calls in case of concatenated frames. -+type Reader struct { -+ Header -+ // Handler called when a block has been successfully read. -+ // It provides the number of bytes read. -+ OnBlockDone func(size int) -+ -+ buf [8]byte // Scrap buffer. -+ pos int64 // Current position in src. -+ src io.Reader // Source. -+ zdata []byte // Compressed data. -+ data []byte // Uncompressed data. -+ idx int // Index of unread bytes into data. -+ checksum xxh32.XXHZero // Frame hash. -+ skip int64 // Bytes to skip before next read. -+ dpos int64 // Position in dest -+} -+ -+// NewReader returns a new LZ4 frame decoder. -+// No access to the underlying io.Reader is performed. -+func NewReader(src io.Reader) *Reader { -+ r := &Reader{src: src} -+ return r -+} -+ -+// readHeader checks the frame magic number and parses the frame descriptoz. -+// Skippable frames are supported even as a first frame although the LZ4 -+// specifications recommends skippable frames not to be used as first frames. -+func (z *Reader) readHeader(first bool) error { -+ defer z.checksum.Reset() -+ -+ buf := z.buf[:] -+ for { -+ magic, err := z.readUint32() -+ if err != nil { -+ z.pos += 4 -+ if !first && err == io.ErrUnexpectedEOF { -+ return io.EOF -+ } -+ return err -+ } -+ if magic == frameMagic { -+ break -+ } -+ if magic>>8 != frameSkipMagic>>8 { -+ return ErrInvalid -+ } -+ skipSize, err := z.readUint32() -+ if err != nil { -+ return err -+ } -+ z.pos += 4 -+ m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) -+ if err != nil { -+ return err -+ } -+ z.pos += m -+ } -+ -+ // Header. -+ if _, err := io.ReadFull(z.src, buf[:2]); err != nil { -+ return err -+ } -+ z.pos += 8 -+ -+ b := buf[0] -+ if v := b >> 6; v != Version { -+ return fmt.Errorf(""lz4: invalid version: got %d; expected %d"", v, Version) -+ } -+ if b>>5&1 == 0 { -+ return ErrBlockDependency -+ } -+ z.BlockChecksum = b>>4&1 > 0 -+ frameSize := b>>3&1 > 0 -+ z.NoChecksum = b>>2&1 == 0 -+ -+ bmsID := buf[1] >> 4 & 0x7 -+ if bmsID < 4 || bmsID > 7 { -+ return fmt.Errorf(""lz4: invalid block max size ID: %d"", bmsID) -+ } -+ bSize := blockSizeIndexToValue(bmsID - 4) -+ z.BlockMaxSize = bSize -+ -+ // Allocate the compressed/uncompressed buffers. -+ // The compressed buffer cannot exceed the uncompressed one. -+ if n := 2 * bSize; cap(z.zdata) < n { -+ z.zdata = make([]byte, n, n) -+ } -+ if debugFlag { -+ debug(""header block max size id=%d size=%d"", bmsID, bSize) -+ } -+ z.zdata = z.zdata[:bSize] -+ z.data = z.zdata[:cap(z.zdata)][bSize:] -+ z.idx = len(z.data) -+ -+ _, _ = z.checksum.Write(buf[0:2]) -+ -+ if frameSize { -+ buf := buf[:8] -+ if _, err := io.ReadFull(z.src, buf); err != nil { -+ return err -+ } -+ z.Size = binary.LittleEndian.Uint64(buf) -+ z.pos += 8 -+ _, _ = z.checksum.Write(buf) -+ } -+ -+ // Header checksum. -+ if _, err := io.ReadFull(z.src, buf[:1]); err != nil { -+ return err -+ } -+ z.pos++ -+ if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] { -+ return fmt.Errorf(""lz4: invalid header checksum: got %x; expected %x"", buf[0], h) -+ } -+ -+ z.Header.done = true -+ if debugFlag { -+ debug(""header read: %v"", z.Header) -+ } -+ -+ return nil -+} -+ -+// Read decompresses data from the underlying source into the supplied buffer. -+// -+// Since there can be multiple streams concatenated, Header values may -+// change between calls to Read(). If that is the case, no data is actually read from -+// the underlying io.Reader, to allow for potential input buffer resizing. -+func (z *Reader) Read(buf []byte) (int, error) { -+ if debugFlag { -+ debug(""Read buf len=%d"", len(buf)) -+ } -+ if !z.Header.done { -+ if err := z.readHeader(true); err != nil { -+ return 0, err -+ } -+ if debugFlag { -+ debug(""header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d"", -+ len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx) -+ } -+ } -+ -+ if len(buf) == 0 { -+ return 0, nil -+ } -+ -+ if z.idx == len(z.data) { -+ // No data ready for reading, process the next block. -+ if debugFlag { -+ debug(""reading block from writer"") -+ } -+ // Reset uncompressed buffer -+ z.data = z.zdata[:cap(z.zdata)][len(z.zdata):] -+ -+ // Block length: 0 = end of frame, highest bit set: uncompressed. -+ bLen, err := z.readUint32() -+ if err != nil { -+ return 0, err -+ } -+ z.pos += 4 -+ -+ if bLen == 0 { -+ // End of frame reached. -+ if !z.NoChecksum { -+ // Validate the frame checksum. -+ checksum, err := z.readUint32() -+ if err != nil { -+ return 0, err -+ } -+ if debugFlag { -+ debug(""frame checksum got=%x / want=%x"", z.checksum.Sum32(), checksum) -+ } -+ z.pos += 4 -+ if h := z.checksum.Sum32(); checksum != h { -+ return 0, fmt.Errorf(""lz4: invalid frame checksum: got %x; expected %x"", h, checksum) -+ } -+ } -+ -+ // Get ready for the next concatenated frame and keep the position. -+ pos := z.pos -+ z.Reset(z.src) -+ z.pos = pos -+ -+ // Since multiple frames can be concatenated, check for more. -+ return 0, z.readHeader(false) -+ } -+ -+ if debugFlag { -+ debug(""raw block size %d"", bLen) -+ } -+ if bLen&compressedBlockFlag > 0 { -+ // Uncompressed block. -+ bLen &= compressedBlockMask -+ if debugFlag { -+ debug(""uncompressed block size %d"", bLen) -+ } -+ if int(bLen) > cap(z.data) { -+ return 0, fmt.Errorf(""lz4: invalid block size: %d"", bLen) -+ } -+ z.data = z.data[:bLen] -+ if _, err := io.ReadFull(z.src, z.data); err != nil { -+ return 0, err -+ } -+ z.pos += int64(bLen) -+ if z.OnBlockDone != nil { -+ z.OnBlockDone(int(bLen)) -+ } -+ -+ if z.BlockChecksum { -+ checksum, err := z.readUint32() -+ if err != nil { -+ return 0, err -+ } -+ z.pos += 4 -+ -+ if h := xxh32.ChecksumZero(z.data); h != checksum { -+ return 0, fmt.Errorf(""lz4: invalid block checksum: got %x; expected %x"", h, checksum) -+ } -+ } -+ -+ } else { -+ // Compressed block. -+ if debugFlag { -+ debug(""compressed block size %d"", bLen) -+ } -+ if int(bLen) > cap(z.data) { -+ return 0, fmt.Errorf(""lz4: invalid block size: %d"", bLen) -+ } -+ zdata := z.zdata[:bLen] -+ if _, err := io.ReadFull(z.src, zdata); err != nil { -+ return 0, err -+ } -+ z.pos += int64(bLen) -+ -+ if z.BlockChecksum { -+ checksum, err := z.readUint32() -+ if err != nil { -+ return 0, err -+ } -+ z.pos += 4 -+ -+ if h := xxh32.ChecksumZero(zdata); h != checksum { -+ return 0, fmt.Errorf(""lz4: invalid block checksum: got %x; expected %x"", h, checksum) -+ } -+ } -+ -+ n, err := UncompressBlock(zdata, z.data) -+ if err != nil { -+ return 0, err -+ } -+ z.data = z.data[:n] -+ if z.OnBlockDone != nil { -+ z.OnBlockDone(n) -+ } -+ } -+ -+ if !z.NoChecksum { -+ _, _ = z.checksum.Write(z.data) -+ if debugFlag { -+ debug(""current frame checksum %x"", z.checksum.Sum32()) -+ } -+ } -+ z.idx = 0 -+ } -+ -+ if z.skip > int64(len(z.data[z.idx:])) { -+ z.skip -= int64(len(z.data[z.idx:])) -+ z.dpos += int64(len(z.data[z.idx:])) -+ z.idx = len(z.data) -+ return 0, nil -+ } -+ -+ z.idx += int(z.skip) -+ z.dpos += z.skip -+ z.skip = 0 -+ -+ n := copy(buf, z.data[z.idx:]) -+ z.idx += n -+ z.dpos += int64(n) -+ if debugFlag { -+ debug(""copied %d bytes to input"", n) -+ } -+ -+ return n, nil -+} -+ -+// Seek implements io.Seeker, but supports seeking forward from the current -+// position only. Any other seek will return an error. Allows skipping output -+// bytes which aren't needed, which in some scenarios is faster than reading -+// and discarding them. -+// Note this may cause future calls to Read() to read 0 bytes if all of the -+// data they would have returned is skipped. -+func (z *Reader) Seek(offset int64, whence int) (int64, error) { -+ if offset < 0 || whence != io.SeekCurrent { -+ return z.dpos + z.skip, ErrUnsupportedSeek -+ } -+ z.skip += offset -+ return z.dpos + z.skip, nil -+} -+ -+// Reset discards the Reader's state and makes it equivalent to the -+// result of its original state from NewReader, but reading from r instead. -+// This permits reusing a Reader rather than allocating a new one. -+func (z *Reader) Reset(r io.Reader) { -+ z.Header = Header{} -+ z.pos = 0 -+ z.src = r -+ z.zdata = z.zdata[:0] -+ z.data = z.data[:0] -+ z.idx = 0 -+ z.checksum.Reset() -+} -+ -+// readUint32 reads an uint32 into the supplied buffer. -+// The idea is to make use of the already allocated buffers avoiding additional allocations. -+func (z *Reader) readUint32() (uint32, error) { -+ buf := z.buf[:4] -+ _, err := io.ReadFull(z.src, buf) -+ x := binary.LittleEndian.Uint32(buf) -+ return x, err -+} -diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go -new file mode 100644 -index 0000000000000..324f1386b8ad8 ---- /dev/null -+++ b/vendor/github.com/pierrec/lz4/writer.go -@@ -0,0 +1,408 @@ -+package lz4 -+ -+import ( -+ ""encoding/binary"" -+ ""fmt"" -+ ""github.com/pierrec/lz4/internal/xxh32"" -+ ""io"" -+ ""runtime"" -+) -+ -+// zResult contains the results of compressing a block. -+type zResult struct { -+ size uint32 // Block header -+ data []byte // Compressed data -+ checksum uint32 // Data checksum -+} -+ -+// Writer implements the LZ4 frame encoder. -+type Writer struct { -+ Header -+ // Handler called when a block has been successfully written out. -+ // It provides the number of bytes written. -+ OnBlockDone func(size int) -+ -+ buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes -+ dst io.Writer // Destination. -+ checksum xxh32.XXHZero // Frame checksum. -+ data []byte // Data to be compressed + buffer for compressed data. -+ idx int // Index into data. -+ hashtable [winSize]int // Hash table used in CompressBlock(). -+ -+ // For concurrency. -+ c chan chan zResult // Channel for block compression goroutines and writer goroutine. -+ err error // Any error encountered while writing to the underlying destination. -+} -+ -+// NewWriter returns a new LZ4 frame encoder. -+// No access to the underlying io.Writer is performed. -+// The supplied Header is checked at the first Write. -+// It is ok to change it before the first Write but then not until a Reset() is performed. -+func NewWriter(dst io.Writer) *Writer { -+ z := new(Writer) -+ z.Reset(dst) -+ return z -+} -+ -+// WithConcurrency sets the number of concurrent go routines used for compression. -+// A negative value sets the concurrency to GOMAXPROCS. -+func (z *Writer) WithConcurrency(n int) *Writer { -+ switch { -+ case n == 0 || n == 1: -+ z.c = nil -+ return z -+ case n < 0: -+ n = runtime.GOMAXPROCS(0) -+ } -+ z.c = make(chan chan zResult, n) -+ // Writer goroutine managing concurrent block compression goroutines. -+ go func() { -+ // Process next block compression item. -+ for c := range z.c { -+ // Read the next compressed block result. -+ // Waiting here ensures that the blocks are output in the order they were sent. -+ // The incoming channel is always closed as it indicates to the caller that -+ // the block has been processed. -+ res := <-c -+ n := len(res.data) -+ if n == 0 { -+ // Notify the block compression routine that we are done with its result. -+ // This is used when a sentinel block is sent to terminate the compression. -+ close(c) -+ return -+ } -+ // Write the block. -+ if err := z.writeUint32(res.size); err != nil && z.err == nil { -+ z.err = err -+ } -+ if _, err := z.dst.Write(res.data); err != nil && z.err == nil { -+ z.err = err -+ } -+ if z.BlockChecksum { -+ if err := z.writeUint32(res.checksum); err != nil && z.err == nil { -+ z.err = err -+ } -+ } -+ if isCompressed := res.size&compressedBlockFlag == 0; isCompressed { -+ // It is now safe to release the buffer as no longer in use by any goroutine. -+ putBuffer(cap(res.data), res.data) -+ } -+ if h := z.OnBlockDone; h != nil { -+ h(n) -+ } -+ close(c) -+ } -+ }() -+ return z -+} -+ -+// newBuffers instantiates new buffers which size matches the one in Header. -+// The returned buffers are for decompression and compression respectively. -+func (z *Writer) newBuffers() { -+ bSize := z.Header.BlockMaxSize -+ buf := getBuffer(bSize) -+ z.data = buf[:bSize] // Uncompressed buffer is the first half. -+} -+ -+// freeBuffers puts the writer's buffers back to the pool. -+func (z *Writer) freeBuffers() { -+ // Put the buffer back into the pool, if any. -+ putBuffer(z.Header.BlockMaxSize, z.data) -+ z.data = nil -+} -+ -+// writeHeader builds and writes the header (magic+header) to the underlying io.Writer. -+func (z *Writer) writeHeader() error { -+ // Default to 4Mb if BlockMaxSize is not set. -+ if z.Header.BlockMaxSize == 0 { -+ z.Header.BlockMaxSize = blockSize4M -+ } -+ // The only option that needs to be validated. -+ bSize := z.Header.BlockMaxSize -+ if !isValidBlockSize(z.Header.BlockMaxSize) { -+ return fmt.Errorf(""lz4: invalid block max size: %d"", bSize) -+ } -+ // Allocate the compressed/uncompressed buffers. -+ // The compressed buffer cannot exceed the uncompressed one. -+ z.newBuffers() -+ z.idx = 0 -+ -+ // Size is optional. -+ buf := z.buf[:] -+ -+ // Set the fixed size data: magic number, block max size and flags. -+ binary.LittleEndian.PutUint32(buf[0:], frameMagic) -+ flg := byte(Version << 6) -+ flg |= 1 << 5 // No block dependency. -+ if z.Header.BlockChecksum { -+ flg |= 1 << 4 -+ } -+ if z.Header.Size > 0 { -+ flg |= 1 << 3 -+ } -+ if !z.Header.NoChecksum { -+ flg |= 1 << 2 -+ } -+ buf[4] = flg -+ buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4 -+ -+ // Current buffer size: magic(4) + flags(1) + block max size (1). -+ n := 6 -+ // Optional items. -+ if z.Header.Size > 0 { -+ binary.LittleEndian.PutUint64(buf[n:], z.Header.Size) -+ n += 8 -+ } -+ -+ // The header checksum includes the flags, block max size and optional Size. -+ buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF) -+ z.checksum.Reset() -+ -+ // Header ready, write it out. -+ if _, err := z.dst.Write(buf[0 : n+1]); err != nil { -+ return err -+ } -+ z.Header.done = true -+ if debugFlag { -+ debug(""wrote header %v"", z.Header) -+ } -+ -+ return nil -+} -+ -+// Write compresses data from the supplied buffer into the underlying io.Writer. -+// Write does not return until the data has been written. -+func (z *Writer) Write(buf []byte) (int, error) { -+ if !z.Header.done { -+ if err := z.writeHeader(); err != nil { -+ return 0, err -+ } -+ } -+ if debugFlag { -+ debug(""input buffer len=%d index=%d"", len(buf), z.idx) -+ } -+ -+ zn := len(z.data) -+ var n int -+ for len(buf) > 0 { -+ if z.idx == 0 && len(buf) >= zn { -+ // Avoid a copy as there is enough data for a block. -+ if err := z.compressBlock(buf[:zn]); err != nil { -+ return n, err -+ } -+ n += zn -+ buf = buf[zn:] -+ continue -+ } -+ // Accumulate the data to be compressed. -+ m := copy(z.data[z.idx:], buf) -+ n += m -+ z.idx += m -+ buf = buf[m:] -+ if debugFlag { -+ debug(""%d bytes copied to buf, current index %d"", n, z.idx) -+ } -+ -+ if z.idx < len(z.data) { -+ // Buffer not filled. -+ if debugFlag { -+ debug(""need more data for compression"") -+ } -+ return n, nil -+ } -+ -+ // Buffer full. -+ if err := z.compressBlock(z.data); err != nil { -+ return n, err -+ } -+ z.idx = 0 -+ } -+ -+ return n, nil -+} -+ -+// compressBlock compresses a block. -+func (z *Writer) compressBlock(data []byte) error { -+ if !z.NoChecksum { -+ _, _ = z.checksum.Write(data) -+ } -+ -+ if z.c != nil { -+ c := make(chan zResult) -+ z.c <- c // Send now to guarantee order -+ go writerCompressBlock(c, z.Header, data) -+ return nil -+ } -+ -+ zdata := z.data[z.Header.BlockMaxSize:cap(z.data)] -+ // The compressed block size cannot exceed the input's. -+ var zn int -+ -+ if level := z.Header.CompressionLevel; level != 0 { -+ zn, _ = CompressBlockHC(data, zdata, level) -+ } else { -+ zn, _ = CompressBlock(data, zdata, z.hashtable[:]) -+ } -+ -+ var bLen uint32 -+ if debugFlag { -+ debug(""block compression %d => %d"", len(data), zn) -+ } -+ if zn > 0 && zn < len(data) { -+ // Compressible and compressed size smaller than uncompressed: ok! -+ bLen = uint32(zn) -+ zdata = zdata[:zn] -+ } else { -+ // Uncompressed block. -+ bLen = uint32(len(data)) | compressedBlockFlag -+ zdata = data -+ } -+ if debugFlag { -+ debug(""block compression to be written len=%d data len=%d"", bLen, len(zdata)) -+ } -+ -+ // Write the block. -+ if err := z.writeUint32(bLen); err != nil { -+ return err -+ } -+ written, err := z.dst.Write(zdata) -+ if err != nil { -+ return err -+ } -+ if h := z.OnBlockDone; h != nil { -+ h(written) -+ } -+ -+ if !z.BlockChecksum { -+ if debugFlag { -+ debug(""current frame checksum %x"", z.checksum.Sum32()) -+ } -+ return nil -+ } -+ checksum := xxh32.ChecksumZero(zdata) -+ if debugFlag { -+ debug(""block checksum %x"", checksum) -+ defer func() { debug(""current frame checksum %x"", z.checksum.Sum32()) }() -+ } -+ return z.writeUint32(checksum) -+} -+ -+// Flush flushes any pending compressed data to the underlying writer. -+// Flush does not return until the data has been written. -+// If the underlying writer returns an error, Flush returns that error. -+func (z *Writer) Flush() error { -+ if debugFlag { -+ debug(""flush with index %d"", z.idx) -+ } -+ if z.idx == 0 { -+ return nil -+ } -+ -+ data := z.data[:z.idx] -+ z.idx = 0 -+ if z.c == nil { -+ return z.compressBlock(data) -+ } -+ if !z.NoChecksum { -+ _, _ = z.checksum.Write(data) -+ } -+ c := make(chan zResult) -+ z.c <- c -+ writerCompressBlock(c, z.Header, data) -+ return nil -+} -+ -+func (z *Writer) close() error { -+ if z.c == nil { -+ return nil -+ } -+ // Send a sentinel block (no data to compress) to terminate the writer main goroutine. -+ c := make(chan zResult) -+ z.c <- c -+ c <- zResult{} -+ // Wait for the main goroutine to complete. -+ <-c -+ // At this point the main goroutine has shut down or is about to return. -+ z.c = nil -+ return z.err -+} -+ -+// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. -+func (z *Writer) Close() error { -+ if !z.Header.done { -+ if err := z.writeHeader(); err != nil { -+ return err -+ } -+ } -+ if err := z.Flush(); err != nil { -+ return err -+ } -+ if err := z.close(); err != nil { -+ return err -+ } -+ z.freeBuffers() -+ -+ if debugFlag { -+ debug(""writing last empty block"") -+ } -+ if err := z.writeUint32(0); err != nil { -+ return err -+ } -+ if z.NoChecksum { -+ return nil -+ } -+ checksum := z.checksum.Sum32() -+ if debugFlag { -+ debug(""stream checksum %x"", checksum) -+ } -+ return z.writeUint32(checksum) -+} -+ -+// Reset clears the state of the Writer z such that it is equivalent to its -+// initial state from NewWriter, but instead writing to w. -+// No access to the underlying io.Writer is performed. -+func (z *Writer) Reset(w io.Writer) { -+ n := cap(z.c) -+ _ = z.close() -+ z.freeBuffers() -+ z.Header.Reset() -+ z.dst = w -+ z.checksum.Reset() -+ z.idx = 0 -+ z.err = nil -+ z.WithConcurrency(n) -+} -+ -+// writeUint32 writes a uint32 to the underlying writer. -+func (z *Writer) writeUint32(x uint32) error { -+ buf := z.buf[:4] -+ binary.LittleEndian.PutUint32(buf, x) -+ _, err := z.dst.Write(buf) -+ return err -+} -+ -+// writerCompressBlock compresses data into a pooled buffer and writes its result -+// out to the input channel. -+func writerCompressBlock(c chan zResult, header Header, data []byte) { -+ zdata := getBuffer(header.BlockMaxSize) -+ // The compressed block size cannot exceed the input's. -+ var zn int -+ if level := header.CompressionLevel; level != 0 { -+ zn, _ = CompressBlockHC(data, zdata, level) -+ } else { -+ var hashTable [winSize]int -+ zn, _ = CompressBlock(data, zdata, hashTable[:]) -+ } -+ var res zResult -+ if zn > 0 && zn < len(data) { -+ res.size = uint32(zn) -+ res.data = zdata[:zn] -+ } else { -+ res.size = uint32(len(data)) | compressedBlockFlag -+ res.data = data -+ } -+ if header.BlockChecksum { -+ res.checksum = xxh32.ChecksumZero(res.data) -+ } -+ c <- res -+} -diff --git a/vendor/modules.txt b/vendor/modules.txt -index 30f1902a8266d..6d085fed8aa85 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -340,11 +340,9 @@ github.com/jonboulle/clockwork - github.com/jpillora/backoff - # github.com/json-iterator/go v1.1.7 - github.com/json-iterator/go --# github.com/klauspost/compress v1.7.4 -+# github.com/klauspost/compress v1.9.4 - github.com/klauspost/compress/flate - github.com/klauspost/compress/gzip --# github.com/klauspost/cpuid v1.2.1 --github.com/klauspost/cpuid - # github.com/konsorten/go-windows-terminal-sequences v1.0.2 - github.com/konsorten/go-windows-terminal-sequences - # github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 -@@ -387,6 +385,9 @@ github.com/opentracing-contrib/go-stdlib/nethttp - github.com/opentracing/opentracing-go - github.com/opentracing/opentracing-go/ext - github.com/opentracing/opentracing-go/log -+# github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible -+github.com/pierrec/lz4 -+github.com/pierrec/lz4/internal/xxh32 - # github.com/pkg/errors v0.8.1 - github.com/pkg/errors - # github.com/pmezard/go-difflib v1.0.0",unknown,"Adds configurable compression algorithms for chunks (#1411) - -* Adds L4Z encoding. - -Signed-off-by: Cyril Tovena - -* Adds encoding benchmarks - -Signed-off-by: Cyril Tovena - -* Adds snappy encoding. - -Signed-off-by: Cyril Tovena - -* Adds chunk size test - -Signed-off-by: Cyril Tovena - -* Adds snappy v2 - -Signed-off-by: Cyril Tovena - -* Improve benchmarks - -Signed-off-by: Cyril Tovena - -* Remove chunkenc - -Signed-off-by: Cyril Tovena - -* Update lz4 to latest master version. - -Signed-off-by: Peter Štibraný - -* Use temporary buffer in serialise method to avoid allocations when doing string -> byte conversion. -It also makes code little more readable. We pool those buffers for reuse. - -Signed-off-by: Peter Štibraný - -* Added gzip -1 for comparison. - -Signed-off-by: Peter Štibraný - -* Initialize reader and buffered reader lazily. - -This helps with reader/buffered reader reuse. - -Signed-off-by: Peter Štibraný - -* Don't keep entries, extracted generateData function - -(mostly to get more understandable profile) - -Signed-off-by: Peter Štibraný - -* Improve test and benchmark to cover all encodings. - -Signed-off-by: Cyril Tovena - -* Adds support for a new chunk format with encoding info. - -Signed-off-by: Cyril Tovena - -* Ingesters now support encoding config. - -Signed-off-by: Cyril Tovena - -* Add support for no compression. - -Signed-off-by: Cyril Tovena - -* Add docs - -Signed-off-by: Cyril Tovena - -* Remove default Gzip for ByteChunk. - -Signed-off-by: Cyril Tovena - -* Removes none, snappyv2 and gzip-1 - -Signed-off-by: Cyril Tovena - -* Move log test lines to testdata and add supported encoding stringer - -Signed-off-by: Cyril Tovena - -* got linted - -Signed-off-by: Cyril Tovena " -76e2a1402977069d6eca5eb525901a88dc577743,2021-10-29 17:29:48,lingpeng0314,"add group_{left,right} to LogQL (#4510) - -* Add group_left/group_right support - -* Update docs/sources/logql/_index.md - -* Minor change for a test case - -* removes CardManyToMany - -* removes now-unused IsSetOperator helper - -Co-authored-by: Owen Diehl ",False,"diff --git a/clients/pkg/promtail/server/ui/assets_vfsdata.go b/clients/pkg/promtail/server/ui/assets_vfsdata.go -index 52741f98a0136..d8d6a57e6d5fc 100644 ---- a/clients/pkg/promtail/server/ui/assets_vfsdata.go -+++ b/clients/pkg/promtail/server/ui/assets_vfsdata.go -@@ -1,5 +1,6 @@ - // Code generated by vfsgen; DO NOT EDIT. - -+//go:build !dev - // +build !dev - - package ui -diff --git a/docs/sources/logql/_index.md b/docs/sources/logql/_index.md -index 225adfe7cc7d5..b3e996eb09085 100644 ---- a/docs/sources/logql/_index.md -+++ b/docs/sources/logql/_index.md -@@ -163,6 +163,23 @@ This example will return every machine total count within the last minutes ratio - sum by(machine) (count_over_time({app=""foo""}[1m])) / on() sum(count_over_time({app=""foo""}[1m])) - ``` - -+### Many-to-one and one-to-many vector matches -+Many-to-one and one-to-many matchings occur when each vector element on the ""one""-side can match with multiple elements on the ""many""-side. You must explicitly request matching by using the group_left or group_right modifier, where left or right determines which vector has the higher cardinality. -+The syntax: -+```logql -+ ignoring() group_left() -+ ignoring() group_right() -+ on() group_left() -+ on() group_right() -+``` -+The label list provided with the group modifier contains additional labels from the ""one""-side that are included in the result metrics. And a label should only appear in one of the lists specified by `on` and `group_x`. Every time series of the result vector must be uniquely identifiable. -+Grouping modifiers can only be used for comparison and arithmetic. By default, the system matches `and`, `unless`, and `or` operations with all entries in the right vector. -+ -+The following example returns sum results for the same application with the many part labels and the labels specified by the group_right operation. -+```logql -+sum by (app,pool) (count_over_time({foo=""bar""}[1m])) + on (app) group_right (pool) sum by (app,machine) (count_over_time({foo=""bar""}[1m])) -+``` -+ - ## Comments - - LogQL queries can be commented using the `#` character: -diff --git a/pkg/logql/ast.go b/pkg/logql/ast.go -index 57f9be5f69c6c..304dff162b643 100644 ---- a/pkg/logql/ast.go -+++ b/pkg/logql/ast.go -@@ -717,6 +717,9 @@ const ( - OpOn = ""on"" - OpIgnoring = ""ignoring"" - -+ OpGroupLeft = ""group_left"" -+ OpGroupRight = ""group_right"" -+ - // conversion Op - OpConvBytes = ""bytes"" - OpConvDuration = ""duration"" -@@ -964,8 +967,41 @@ func (e *VectorAggregationExpr) Walk(f WalkFn) { - e.Left.Walk(f) - } - -+// VectorMatchCardinality describes the cardinality relationship -+// of two Vectors in a binary operation. -+type VectorMatchCardinality int -+ -+const ( -+ CardOneToOne VectorMatchCardinality = iota -+ CardManyToOne -+ CardOneToMany -+) -+ -+func (vmc VectorMatchCardinality) String() string { -+ switch vmc { -+ case CardOneToOne: -+ return ""one-to-one"" -+ case CardManyToOne: -+ return ""many-to-one"" -+ case CardOneToMany: -+ return ""one-to-many"" -+ } -+ panic(""promql.VectorMatchCardinality.String: unknown match cardinality"") -+} -+ -+// VectorMatching describes how elements from two Vectors in a binary -+// operation are supposed to be matched. - type VectorMatching struct { -- On bool -+ // The cardinality of the two Vectors. -+ Card VectorMatchCardinality -+ // MatchingLabels contains the labels which define equality of a pair of -+ // elements from the Vectors. -+ MatchingLabels []string -+ // On includes the given label names from matching, -+ // rather than excluding them. -+ On bool -+ // Include contains additional labels that should be included in -+ // the result from the side with the lower cardinality. - Include []string - } - -@@ -988,10 +1024,22 @@ func (e *BinOpExpr) String() string { - op = fmt.Sprintf(""%s bool"", op) - } - if e.Opts.VectorMatching != nil { -- if e.Opts.VectorMatching.On { -- op = fmt.Sprintf(""%s %s (%s)"", op, OpOn, strings.Join(e.Opts.VectorMatching.Include, "","")) -- } else { -- op = fmt.Sprintf(""%s %s (%s)"", op, OpIgnoring, strings.Join(e.Opts.VectorMatching.Include, "","")) -+ group := """" -+ if e.Opts.VectorMatching.Card == CardManyToOne { -+ group = OpGroupLeft -+ } else if e.Opts.VectorMatching.Card == CardOneToMany { -+ group = OpGroupRight -+ } -+ if e.Opts.VectorMatching.Include != nil { -+ group = fmt.Sprintf(""%s (%s)"", group, strings.Join(e.Opts.VectorMatching.Include, "","")) -+ } -+ -+ if e.Opts.VectorMatching.On || e.Opts.VectorMatching.MatchingLabels != nil { -+ on := OpOn -+ if !e.Opts.VectorMatching.On { -+ on = OpIgnoring -+ } -+ op = fmt.Sprintf(""%s %s (%s) %s"", op, on, strings.Join(e.Opts.VectorMatching.MatchingLabels, "",""), group) - } - } - } -diff --git a/pkg/logql/ast_test.go b/pkg/logql/ast_test.go -index 8bf47df02c233..b8c08743640be 100644 ---- a/pkg/logql/ast_test.go -+++ b/pkg/logql/ast_test.go -@@ -318,7 +318,6 @@ func TestStringer(t *testing.T) { - out: `(0 > bool count_over_time({foo=""bar""}[1m]))`, - }, - { -- - in: `0 > count_over_time({foo=""bar""}[1m])`, - out: `(0 > count_over_time({foo=""bar""}[1m]))`, - }, -diff --git a/pkg/logql/engine_test.go b/pkg/logql/engine_test.go -index bd631885e4d0b..7a9f34a442f1b 100644 ---- a/pkg/logql/engine_test.go -+++ b/pkg/logql/engine_test.go -@@ -537,9 +537,7 @@ func TestEngine_LogsInstantQuery(t *testing.T) { - {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `count_over_time({app=""foo""}[1m])`}}, - {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `count_over_time({app=""bar""}[1m])`}}, - }, -- promql.Vector{ -- promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0}, Metric: labels.Labels{labels.Label{Name: ""app"", Value: ""foo""}}}, -- }, -+ promql.Vector{}, - }, - { - `sum without(app) (count_over_time({app=""foo""}[1m])) > bool sum without(app) (count_over_time({app=""bar""}[1m]))`, -@@ -554,9 +552,7 @@ func TestEngine_LogsInstantQuery(t *testing.T) { - {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `sum without (app) (count_over_time({app=""foo""}[1m]))`}}, - {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `sum without (app) (count_over_time({app=""bar""}[1m]))`}}, - }, -- promql.Vector{ -- promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0}, Metric: labels.Labels{}}, -- }, -+ promql.Vector{}, - }, - { - `sum without(app) (count_over_time({app=""foo""}[1m])) >= sum without(app) (count_over_time({app=""bar""}[1m]))`, -@@ -649,7 +645,24 @@ func TestEngine_LogsInstantQuery(t *testing.T) { - {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `sum by (app) (count_over_time({app=""foo""}[1m]))`}}, - }, - promql.Vector{ -- promql.Sample{Point: promql.Point{T: 60 * 1000, V: 120}, Metric: labels.Labels{labels.Label{Name: ""app"", Value: ""foo""}, labels.Label{Name: ""machine"", Value: ""fuzz""}}}, -+ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 120}, Metric: labels.Labels{}}, -+ }, -+ }, -+ { -+ `sum by (app,machine) (count_over_time({app=""foo""}[1m])) + on (app) sum by (app) (count_over_time({app=""foo""}[1m]))`, -+ time.Unix(60, 0), -+ logproto.FORWARD, -+ 0, -+ [][]logproto.Series{ -+ {newSeries(testSize, identity, `{app=""foo"",machine=""fuzz""}`)}, -+ {newSeries(testSize, identity, `{app=""foo""}`)}, -+ }, -+ []SelectSampleParams{ -+ {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `sum by (app,machine) (count_over_time({app=""foo""}[1m]))`}}, -+ {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `sum by (app) (count_over_time({app=""foo""}[1m]))`}}, -+ }, -+ promql.Vector{ -+ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 120}, Metric: labels.Labels{labels.Label{Name: ""app"", Value: ""foo""}}}, - }, - }, - { -@@ -666,7 +679,7 @@ func TestEngine_LogsInstantQuery(t *testing.T) { - {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `sum by (app) (count_over_time({app=""foo""}[1m]))`}}, - }, - promql.Vector{ -- promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0}, Metric: labels.Labels{labels.Label{Name: ""app"", Value: ""foo""}, labels.Label{Name: ""machine"", Value: ""fuzz""}}}, -+ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0}, Metric: labels.Labels{labels.Label{Name: ""app"", Value: ""foo""}}}, - }, - }, - { -@@ -682,7 +695,79 @@ func TestEngine_LogsInstantQuery(t *testing.T) { - {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `sum by (app,machine) (count_over_time({app=""foo""}[1m]))`}}, - {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `sum by (app) (count_over_time({app=""foo""}[1m]))`}}, - }, -- errors.New(""multiple matches for labels""), -+ errors.New(""multiple matches for labels: many-to-one matching must be explicit (group_left/group_right)""), -+ }, -+ { -+ `sum by (app,machine) (count_over_time({app=""foo""}[1m])) > bool on () group_left sum by (app) (count_over_time({app=""foo""}[1m]))`, -+ time.Unix(60, 0), -+ logproto.FORWARD, -+ 0, -+ [][]logproto.Series{ -+ {newSeries(testSize, identity, `{app=""foo"",machine=""fuzz""}`), newSeries(testSize, identity, `{app=""foo"",machine=""buzz""}`)}, -+ {newSeries(testSize, identity, `{app=""foo""}`)}, -+ }, -+ []SelectSampleParams{ -+ {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `sum by (app,machine) (count_over_time({app=""foo""}[1m]))`}}, -+ {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `sum by (app) (count_over_time({app=""foo""}[1m]))`}}, -+ }, -+ promql.Vector{ -+ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0}, Metric: labels.Labels{labels.Label{Name: ""app"", Value: ""foo""}, labels.Label{Name: ""machine"", Value: ""buzz""}}}, -+ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0}, Metric: labels.Labels{labels.Label{Name: ""app"", Value: ""foo""}, labels.Label{Name: ""machine"", Value: ""fuzz""}}}, -+ }, -+ }, -+ { -+ `sum by (app,machine) (count_over_time({app=""foo""}[1m])) > bool on () group_left () sum by (app) (count_over_time({app=""foo""}[1m]))`, -+ time.Unix(60, 0), -+ logproto.FORWARD, -+ 0, -+ [][]logproto.Series{ -+ {newSeries(testSize, identity, `{app=""foo"",machine=""fuzz""}`), newSeries(testSize, identity, `{app=""foo"",machine=""buzz""}`)}, -+ {newSeries(testSize, identity, `{app=""foo""}`)}, -+ }, -+ []SelectSampleParams{ -+ {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `sum by (app,machine) (count_over_time({app=""foo""}[1m]))`}}, -+ {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `sum by (app) (count_over_time({app=""foo""}[1m]))`}}, -+ }, -+ promql.Vector{ -+ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0}, Metric: labels.Labels{labels.Label{Name: ""app"", Value: ""foo""}, labels.Label{Name: ""machine"", Value: ""buzz""}}}, -+ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0}, Metric: labels.Labels{labels.Label{Name: ""app"", Value: ""foo""}, labels.Label{Name: ""machine"", Value: ""fuzz""}}}, -+ }, -+ }, -+ { -+ `sum by (app,machine) (count_over_time({app=""foo""}[1m])) > bool on (app) group_left (pool) sum by (app,pool) (count_over_time({app=""foo""}[1m]))`, -+ time.Unix(60, 0), -+ logproto.FORWARD, -+ 0, -+ [][]logproto.Series{ -+ {newSeries(testSize, identity, `{app=""foo"",machine=""fuzz""}`), newSeries(testSize, identity, `{app=""foo"",machine=""buzz""}`)}, -+ {newSeries(testSize, identity, `{app=""foo"",pool=""foo""}`)}, -+ }, -+ []SelectSampleParams{ -+ {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `sum by (app,machine) (count_over_time({app=""foo""}[1m]))`}}, -+ {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `sum by (app,pool) (count_over_time({app=""foo""}[1m]))`}}, -+ }, -+ promql.Vector{ -+ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0}, Metric: labels.Labels{labels.Label{Name: ""app"", Value: ""foo""}, labels.Label{Name: ""machine"", Value: ""buzz""}, labels.Label{Name: ""pool"", Value: ""foo""}}}, -+ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0}, Metric: labels.Labels{labels.Label{Name: ""app"", Value: ""foo""}, labels.Label{Name: ""machine"", Value: ""fuzz""}, labels.Label{Name: ""pool"", Value: ""foo""}}}, -+ }, -+ }, -+ { -+ `sum by (app,pool) (count_over_time({app=""foo""}[1m])) > bool on (app) group_right (pool) sum by (app,machine) (count_over_time({app=""foo""}[1m]))`, -+ time.Unix(60, 0), -+ logproto.FORWARD, -+ 0, -+ [][]logproto.Series{ -+ {newSeries(testSize, identity, `{app=""foo"",pool=""foo""}`)}, -+ {newSeries(testSize, identity, `{app=""foo"",machine=""fuzz""}`), newSeries(testSize, identity, `{app=""foo"",machine=""buzz""}`)}, -+ }, -+ []SelectSampleParams{ -+ {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `sum by (app,pool) (count_over_time({app=""foo""}[1m]))`}}, -+ {&logproto.SampleQueryRequest{Start: time.Unix(0, 0), End: time.Unix(60, 0), Selector: `sum by (app,machine) (count_over_time({app=""foo""}[1m]))`}}, -+ }, -+ promql.Vector{ -+ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0}, Metric: labels.Labels{labels.Label{Name: ""app"", Value: ""foo""}, labels.Label{Name: ""machine"", Value: ""buzz""}, labels.Label{Name: ""pool"", Value: ""foo""}}}, -+ promql.Sample{Point: promql.Point{T: 60 * 1000, V: 0}, Metric: labels.Labels{labels.Label{Name: ""app"", Value: ""foo""}, labels.Label{Name: ""machine"", Value: ""fuzz""}, labels.Label{Name: ""pool"", Value: ""foo""}}}, -+ }, - }, - } { - test := test -diff --git a/pkg/logql/evaluator.go b/pkg/logql/evaluator.go -index f4d87cc4c89f4..8a98c23b681ed 100644 ---- a/pkg/logql/evaluator.go -+++ b/pkg/logql/evaluator.go -@@ -3,6 +3,7 @@ package logql - import ( - ""container/heap"" - ""context"" -+ ""fmt"" - ""math"" - ""sort"" - ""time"" -@@ -547,71 +548,56 @@ func binOpStepEvaluator( - } - - // we have two non literal legs -- lhs, err := ev.StepEvaluator(ctx, ev, expr.SampleExpr, q) -+ lse, err := ev.StepEvaluator(ctx, ev, expr.SampleExpr, q) - if err != nil { - return nil, err - } -- rhs, err := ev.StepEvaluator(ctx, ev, expr.RHS, q) -+ rse, err := ev.StepEvaluator(ctx, ev, expr.RHS, q) - if err != nil { - return nil, err - } - - return newStepEvaluator(func() (bool, int64, promql.Vector) { -- pairs := map[uint64][2]*promql.Sample{} -- var ts int64 -- -- // populate pairs -- for i, eval := range []StepEvaluator{lhs, rhs} { -- next, timestamp, vec := eval.Next() -- -- ts = timestamp -- -- // These should _always_ happen at the same step on each evaluator. -- if !next { -- return next, ts, nil -- } -- -- for _, sample := range vec { -- // TODO(owen-d): this seems wildly inefficient: we're calculating -- // the hash on each sample & step per evaluator. -- // We seem limited to this approach due to using the StepEvaluator ifc. -- -- var hash uint64 -- if expr.Opts == nil || expr.Opts.VectorMatching == nil { -- hash = sample.Metric.Hash() -- } else if expr.Opts.VectorMatching.On { -- hash = sample.Metric.WithLabels(expr.Opts.VectorMatching.Include...).Hash() -- } else { -- hash = sample.Metric.WithoutLabels(expr.Opts.VectorMatching.Include...).Hash() -- } -- pair := pairs[hash] -- if pair[i] != nil { -- err = errors.New(""multiple matches for labels"") -- return false, ts, nil -- } -- pair[i] = &promql.Sample{ -- Metric: sample.Metric, -- Point: sample.Point, -- } -- pairs[hash] = pair -- } -+ var ( -+ ts int64 -+ next bool -+ lhs, rhs promql.Vector -+ ) -+ next, ts, rhs = rse.Next() -+ // These should _always_ happen at the same step on each evaluator. -+ if !next { -+ return next, ts, nil -+ } -+ // build matching signature for each sample in right vector -+ rsigs := make([]uint64, len(rhs)) -+ for i, sample := range rhs { -+ rsigs[i] = matchingSignature(sample, expr.Opts) - } - -- results := make(promql.Vector, 0, len(pairs)) -- for _, pair := range pairs { -- // merge -- filter := true -- if expr.Opts != nil && expr.Opts.ReturnBool { -- filter = false -- } -- if merged := mergeBinOp(expr.Op, pair[0], pair[1], filter, IsComparisonOperator(expr.Op)); merged != nil { -- results = append(results, *merged) -- } -+ next, ts, lhs = lse.Next() -+ if !next { -+ return next, ts, nil -+ } -+ // build matching signature for each sample in left vector -+ lsigs := make([]uint64, len(lhs)) -+ for i, sample := range lhs { -+ lsigs[i] = matchingSignature(sample, expr.Opts) - } - -+ var results promql.Vector -+ switch expr.Op { -+ case OpTypeAnd: -+ results = vectorAnd(lhs, rhs, lsigs, rsigs) -+ case OpTypeOr: -+ results = vectorOr(lhs, rhs, lsigs, rsigs) -+ case OpTypeUnless: -+ results = vectorUnless(lhs, rhs, lsigs, rsigs) -+ default: -+ results, err = vectorBinop(expr.Op, expr.Opts, lhs, rhs, lsigs, rsigs) -+ } - return true, ts, results - }, func() (lastError error) { -- for _, ev := range []StepEvaluator{lhs, rhs} { -+ for _, ev := range []StepEvaluator{lse, rse} { - if err := ev.Close(); err != nil { - lastError = err - } -@@ -622,7 +608,7 @@ func binOpStepEvaluator( - if err != nil { - errs = append(errs, err) - } -- for _, ev := range []StepEvaluator{lhs, rhs} { -+ for _, ev := range []StepEvaluator{lse, rse} { - if err := ev.Error(); err != nil { - errs = append(errs, err) - } -@@ -638,37 +624,190 @@ func binOpStepEvaluator( - }) - } - --func mergeBinOp(op string, left, right *promql.Sample, filter, isVectorComparison bool) *promql.Sample { -- var merger func(left, right *promql.Sample) *promql.Sample -+func matchingSignature(sample promql.Sample, opts *BinOpOptions) uint64 { -+ if opts == nil || opts.VectorMatching == nil { -+ return sample.Metric.Hash() -+ } else if opts.VectorMatching.On { -+ return sample.Metric.WithLabels(opts.VectorMatching.MatchingLabels...).Hash() -+ } else { -+ return sample.Metric.WithoutLabels(opts.VectorMatching.MatchingLabels...).Hash() -+ } -+} - -- switch op { -- case OpTypeOr: -- merger = func(left, right *promql.Sample) *promql.Sample { -- // return the left entry found (prefers left hand side) -- if left != nil { -- return left -+func vectorBinop(op string, opts *BinOpOptions, lhs, rhs promql.Vector, lsigs, rsigs []uint64) (promql.Vector, error) { -+ // handle one-to-one or many-to-one matching -+ //for one-to-many, swap -+ if opts != nil && opts.VectorMatching.Card == CardOneToMany { -+ lhs, rhs = rhs, lhs -+ lsigs, rsigs = rsigs, lsigs -+ } -+ rightSigs := make(map[uint64]*promql.Sample) -+ matchedSigs := make(map[uint64]map[uint64]struct{}) -+ results := make(promql.Vector, 0) -+ -+ // Add all rhs samples to a map so we can easily find matches later. -+ for i, sample := range rhs { -+ sig := rsigs[i] -+ if rightSigs[sig] != nil { -+ side := ""right"" -+ if opts.VectorMatching.Card == CardOneToMany { -+ side = ""left"" - } -- return right -+ return nil, fmt.Errorf(""found duplicate series on the %s hand-side""+ -+ "";many-to-many matching not allowed: matching labels must be unique on one side"", side) - } -+ rightSigs[sig] = &promql.Sample{ -+ Metric: sample.Metric, -+ Point: sample.Point, -+ } -+ } - -- case OpTypeAnd: -- merger = func(left, right *promql.Sample) *promql.Sample { -- // return left sample if there's a second sample for that label set -- if left != nil && right != nil { -- return left -+ for i, sample := range lhs { -+ ls := &sample -+ sig := lsigs[i] -+ rs, found := rightSigs[sig] // Look for a match in the rhs Vector. -+ if !found { -+ continue -+ } -+ -+ metric := resultMetric(ls.Metric, rs.Metric, opts) -+ insertedSigs, exists := matchedSigs[sig] -+ filter := true -+ if opts != nil { -+ if opts.VectorMatching.Card == CardOneToOne { -+ if exists { -+ return nil, errors.New(""multiple matches for labels: many-to-one matching must be explicit (group_left/group_right)"") -+ } -+ matchedSigs[sig] = nil -+ } else { -+ insertSig := metric.Hash() -+ if !exists { -+ insertedSigs = map[uint64]struct{}{} -+ matchedSigs[sig] = insertedSigs -+ } else if _, duplicate := insertedSigs[insertSig]; duplicate { -+ return nil, errors.New(""multiple matches for labels: grouping labels must ensure unique matches"") -+ } -+ insertedSigs[insertSig] = struct{}{} -+ } -+ // merge -+ if opts.ReturnBool { -+ filter = false -+ } -+ // swap back before apply binary operator -+ if opts.VectorMatching.Card == CardOneToMany { -+ ls, rs = rs, ls - } -- return nil - } - -- case OpTypeUnless: -- merger = func(left, right *promql.Sample) *promql.Sample { -- // return left sample if there's not a second sample for that label set -- if right == nil { -- return left -+ if merged := mergeBinOp(op, ls, rs, filter, IsComparisonOperator(op)); merged != nil { -+ // replace with labels specified by expr -+ merged.Metric = metric -+ results = append(results, *merged) -+ } -+ } -+ return results, nil -+} -+ -+func vectorAnd(lhs, rhs promql.Vector, lsigs, rsigs []uint64) promql.Vector { -+ if len(lhs) == 0 || len(rhs) == 0 { -+ return nil // Short-circuit: AND with nothing is nothing. -+ } -+ -+ rightSigs := make(map[uint64]struct{}) -+ results := make(promql.Vector, 0) -+ -+ for _, sig := range rsigs { -+ rightSigs[sig] = struct{}{} -+ } -+ for i, ls := range lhs { -+ if _, ok := rightSigs[lsigs[i]]; ok { -+ results = append(results, ls) -+ } -+ } -+ return results -+} -+ -+func vectorOr(lhs, rhs promql.Vector, lsigs, rsigs []uint64) promql.Vector { -+ if len(lhs) == 0 { -+ return rhs -+ } else if len(rhs) == 0 { -+ return lhs -+ } -+ -+ leftSigs := make(map[uint64]struct{}) -+ results := make(promql.Vector, 0) -+ -+ for i, ls := range lhs { -+ leftSigs[lsigs[i]] = struct{}{} -+ results = append(results, ls) -+ } -+ for i, rs := range rhs { -+ if _, ok := leftSigs[rsigs[i]]; !ok { -+ results = append(results, rs) -+ } -+ } -+ return results -+} -+ -+func vectorUnless(lhs, rhs promql.Vector, lsigs, rsigs []uint64) promql.Vector { -+ if len(lhs) == 0 || len(rhs) == 0 { -+ return lhs -+ } -+ -+ rightSigs := make(map[uint64]struct{}) -+ results := make(promql.Vector, 0) -+ -+ for _, sig := range rsigs { -+ rightSigs[sig] = struct{}{} -+ } -+ -+ for i, ls := range lhs { -+ if _, ok := rightSigs[lsigs[i]]; !ok { -+ results = append(results, ls) -+ } -+ } -+ return results -+} -+ -+// resultMetric returns the metric for the given sample(s) based on the Vector -+// binary operation and the matching options. -+func resultMetric(lhs, rhs labels.Labels, opts *BinOpOptions) labels.Labels { -+ lb := labels.NewBuilder(lhs) -+ -+ if opts != nil { -+ matching := opts.VectorMatching -+ if matching.Card == CardOneToOne { -+ if matching.On { -+ Outer: -+ for _, l := range lhs { -+ for _, n := range matching.MatchingLabels { -+ if l.Name == n { -+ continue Outer -+ } -+ } -+ lb.Del(l.Name) -+ } -+ } else { -+ lb.Del(matching.MatchingLabels...) -+ } -+ } -+ for _, ln := range matching.Include { -+ // Included labels from the `group_x` modifier are taken from the ""one""-side. -+ if v := rhs.Get(ln); v != """" { -+ lb.Set(ln, v) -+ } else { -+ lb.Del(ln) - } -- return nil - } -+ } - -+ return lb.Labels() -+} -+ -+func mergeBinOp(op string, left, right *promql.Sample, filter, isVectorComparison bool) *promql.Sample { -+ var merger func(left, right *promql.Sample) *promql.Sample -+ -+ switch op { - case OpTypeAdd: - merger = func(left, right *promql.Sample) *promql.Sample { - if left == nil || right == nil { -@@ -901,20 +1040,6 @@ func mergeBinOp(op string, left, right *promql.Sample, filter, isVectorCompariso - if res != nil { - return left - } -- -- // otherwise it's been filtered out -- return res -- } -- -- // This only leaves vector comparisons which are not filters. -- // If we could not find a match but we have a left node to compare, create an entry with a 0 value. -- // This can occur when we don't find a matching label set in the vectors. -- if res == nil && left != nil && right == nil { -- res = &promql.Sample{ -- Metric: left.Metric, -- Point: left.Point, -- } -- res.Point.V = 0 - } - return res - } -diff --git a/pkg/logql/evaluator_test.go b/pkg/logql/evaluator_test.go -index 3d1dcaf262af3..b930535301fbc 100644 ---- a/pkg/logql/evaluator_test.go -+++ b/pkg/logql/evaluator_test.go -@@ -203,15 +203,7 @@ func TestEvaluator_mergeBinOpComparisons(t *testing.T) { - require.Equal(t, tc.expected, mergeBinOp(tc.op, tc.lhs, tc.rhs, false, false)) - require.Equal(t, tc.expected, mergeBinOp(tc.op, tc.lhs, tc.rhs, false, true)) - -- // vector-vector comparing when not filtering should propagate the labels for nil right hand side matches, -- // but set the value to zero. -- require.Equal( -- t, -- &promql.Sample{ -- Point: promql.Point{V: 0}, -- }, -- mergeBinOp(tc.op, tc.lhs, nil, false, true), -- ) -+ require.Nil(t, mergeBinOp(tc.op, tc.lhs, nil, false, true)) - - // test filtered variants - if tc.expected.V == 0 { -diff --git a/pkg/logql/expr.y b/pkg/logql/expr.y -index 26ce2004374eb..7f1cac712e8b3 100644 ---- a/pkg/logql/expr.y -+++ b/pkg/logql/expr.y -@@ -35,6 +35,7 @@ import ( - LiteralExpr *LiteralExpr - BinOpModifier *BinOpOptions - BoolModifier *BinOpOptions -+ OnOrIgnoringModifier *BinOpOptions - LabelParser *LabelParserExpr - LineFilters *LineFilterExpr - LineFilter *LineFilterExpr -@@ -80,6 +81,7 @@ import ( - %type labelReplaceExpr - %type binOpModifier - %type boolModifier -+%type onOrIgnoringModifier - %type labelParser - %type pipelineExpr - %type pipelineStage -@@ -108,7 +110,7 @@ import ( - OPEN_PARENTHESIS CLOSE_PARENTHESIS BY WITHOUT COUNT_OVER_TIME RATE SUM AVG MAX MIN COUNT STDDEV STDVAR BOTTOMK TOPK - BYTES_OVER_TIME BYTES_RATE BOOL JSON REGEXP LOGFMT PIPE LINE_FMT LABEL_FMT UNWRAP AVG_OVER_TIME SUM_OVER_TIME MIN_OVER_TIME - MAX_OVER_TIME STDVAR_OVER_TIME STDDEV_OVER_TIME QUANTILE_OVER_TIME BYTES_CONV DURATION_CONV DURATION_SECONDS_CONV -- FIRST_OVER_TIME LAST_OVER_TIME ABSENT_OVER_TIME LABEL_REPLACE UNPACK OFFSET PATTERN IP ON IGNORING -+ FIRST_OVER_TIME LAST_OVER_TIME ABSENT_OVER_TIME LABEL_REPLACE UNPACK OFFSET PATTERN IP ON IGNORING GROUP_LEFT GROUP_RIGHT - - // Operators are listed with increasing precedence. - %left OR -@@ -363,34 +365,75 @@ binOpExpr: - | expr LTE binOpModifier expr { $$ = mustNewBinOpExpr(""<="", $3, $1, $4) } - ; - --binOpModifier: -- boolModifier { $$ = $1 } -- | boolModifier ON OPEN_PARENTHESIS labels CLOSE_PARENTHESIS -+boolModifier: -+ { -+ $$ = &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}} -+ } -+ | BOOL -+ { -+ $$ = &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}, ReturnBool:true} -+ } -+ ; -+ -+onOrIgnoringModifier: -+ boolModifier ON OPEN_PARENTHESIS labels CLOSE_PARENTHESIS - { - $$ = $1 -- $$.VectorMatching = &VectorMatching{On: true, Include: $4} -+ $$.VectorMatching.On=true -+ $$.VectorMatching.MatchingLabels=$4 - } - | boolModifier ON OPEN_PARENTHESIS CLOSE_PARENTHESIS - { - $$ = $1 -- $$.VectorMatching = &VectorMatching{On: true, Include: nil} -+ $$.VectorMatching.On=true - } - | boolModifier IGNORING OPEN_PARENTHESIS labels CLOSE_PARENTHESIS - { - $$ = $1 -- $$.VectorMatching = &VectorMatching{On: false, Include: $4} -+ $$.VectorMatching.MatchingLabels=$4 - } - | boolModifier IGNORING OPEN_PARENTHESIS CLOSE_PARENTHESIS - { - $$ = $1 -- $$.VectorMatching = &VectorMatching{On: false, Include: nil} - } - ; - --boolModifier: -- { $$ = &BinOpOptions{} } -- | BOOL { $$ = &BinOpOptions{ ReturnBool: true } } -- ; -+binOpModifier: -+ boolModifier {$$ = $1 } -+ | onOrIgnoringModifier {$$ = $1 } -+ | onOrIgnoringModifier GROUP_LEFT -+ { -+ $$ = $1 -+ $$.VectorMatching.Card = CardManyToOne -+ } -+ | onOrIgnoringModifier GROUP_LEFT OPEN_PARENTHESIS CLOSE_PARENTHESIS -+ { -+ $$ = $1 -+ $$.VectorMatching.Card = CardManyToOne -+ } -+ | onOrIgnoringModifier GROUP_LEFT OPEN_PARENTHESIS labels CLOSE_PARENTHESIS -+ { -+ $$ = $1 -+ $$.VectorMatching.Card = CardManyToOne -+ $$.VectorMatching.Include = $4 -+ } -+ | onOrIgnoringModifier GROUP_RIGHT -+ { -+ $$ = $1 -+ $$.VectorMatching.Card = CardOneToMany -+ } -+ | onOrIgnoringModifier GROUP_RIGHT OPEN_PARENTHESIS CLOSE_PARENTHESIS -+ { -+ $$ = $1 -+ $$.VectorMatching.Card = CardOneToMany -+ } -+ | onOrIgnoringModifier GROUP_RIGHT OPEN_PARENTHESIS labels CLOSE_PARENTHESIS -+ { -+ $$ = $1 -+ $$.VectorMatching.Card = CardOneToMany -+ $$.VectorMatching.Include = $4 -+ } -+ ; - - literalExpr: - NUMBER { $$ = mustNewLiteralExpr( $1, false ) } -diff --git a/pkg/logql/expr.y.go b/pkg/logql/expr.y.go -index acc3e6f462b2f..67e81ded7673c 100644 ---- a/pkg/logql/expr.y.go -+++ b/pkg/logql/expr.y.go -@@ -38,6 +38,7 @@ type exprSymType struct { - LiteralExpr *LiteralExpr - BinOpModifier *BinOpOptions - BoolModifier *BinOpOptions -+ OnOrIgnoringModifier *BinOpOptions - LabelParser *LabelParserExpr - LineFilters *LineFilterExpr - LineFilter *LineFilterExpr -@@ -124,21 +125,23 @@ const PATTERN = 57406 - const IP = 57407 - const ON = 57408 - const IGNORING = 57409 --const OR = 57410 --const AND = 57411 --const UNLESS = 57412 --const CMP_EQ = 57413 --const NEQ = 57414 --const LT = 57415 --const LTE = 57416 --const GT = 57417 --const GTE = 57418 --const ADD = 57419 --const SUB = 57420 --const MUL = 57421 --const DIV = 57422 --const MOD = 57423 --const POW = 57424 -+const GROUP_LEFT = 57410 -+const GROUP_RIGHT = 57411 -+const OR = 57412 -+const AND = 57413 -+const UNLESS = 57414 -+const CMP_EQ = 57415 -+const NEQ = 57416 -+const LT = 57417 -+const LTE = 57418 -+const GT = 57419 -+const GTE = 57420 -+const ADD = 57421 -+const SUB = 57422 -+const MUL = 57423 -+const DIV = 57424 -+const MOD = 57425 -+const POW = 57426 - - var exprToknames = [...]string{ - ""$end"", -@@ -208,6 +211,8 @@ var exprToknames = [...]string{ - ""IP"", - ""ON"", - ""IGNORING"", -+ ""GROUP_LEFT"", -+ ""GROUP_RIGHT"", - ""OR"", - ""AND"", - ""UNLESS"", -@@ -239,130 +244,133 @@ var exprExca = [...]int{ - - const exprPrivate = 57344 - --const exprLast = 539 -+const exprLast = 532 - - var exprAct = [...]int{ - -- 243, 192, 76, 4, 173, 58, 161, 5, 166, 201, -- 67, 111, 50, 57, 246, 134, 69, 2, 45, 46, -+ 248, 195, 76, 4, 176, 58, 164, 5, 169, 204, -+ 67, 112, 50, 57, 122, 135, 69, 2, 45, 46, - 47, 48, 49, 50, 72, 42, 43, 44, 51, 52, - 55, 56, 53, 54, 45, 46, 47, 48, 49, 50, - 43, 44, 51, 52, 55, 56, 53, 54, 45, 46, -- 47, 48, 49, 50, 47, 48, 49, 50, 130, 132, -- 133, 65, 309, 99, 175, 132, 133, 103, 63, 64, -- 223, 251, 185, 224, 222, 145, 146, 248, 61, 138, -- 309, 246, 136, 121, 84, 143, 51, 52, 55, 56, -- 53, 54, 45, 46, 47, 48, 49, 50, 75, 144, -- 77, 78, 147, 148, 149, 150, 151, 152, 153, 154, -- 155, 156, 157, 158, 159, 160, 77, 78, 131, 66, -- 285, 118, 170, 181, 176, 179, 180, 177, 178, 65, -- 219, 221, 184, 220, 218, 163, 63, 64, 100, 115, -- 183, 285, 123, 260, 199, 195, 203, 247, 300, 249, -- 193, 247, 204, 196, 65, 248, 312, 188, 260, 194, -- 329, 63, 64, 299, 293, 267, 292, 260, 191, 209, -- 210, 211, 262, 65, 188, 203, 248, 324, 246, 277, -- 63, 64, 248, 252, 194, 162, 248, 66, 203, 317, -- 118, 217, 241, 244, 265, 250, 255, 253, 136, 99, -- 256, 103, 257, 194, 163, 245, 242, 205, 115, 254, -- 260, 249, 66, 188, 191, 261, 65, 264, 266, 65, -- 270, 268, 203, 63, 64, 316, 63, 64, 314, 65, -- 295, 66, 65, 276, 258, 189, 63, 64, 197, 63, -- 64, 202, 306, 118, 286, 278, 194, 280, 282, 194, -- 284, 99, 118, 164, 162, 283, 294, 279, 118, 194, -- 99, 115, 60, 296, 125, 12, 163, 135, 124, 275, -- 115, 214, 163, 137, 66, 12, 115, 66, 274, 208, -- 303, 304, 207, 137, 182, 99, 305, 66, 142, 141, -- 66, 140, 307, 308, 288, 289, 290, 81, 313, 74, -- 327, 323, 298, 127, 259, 15, 215, 212, 206, 319, -- 198, 320, 321, 12, 190, 164, 162, 126, 129, 216, -- 128, 6, 213, 325, 322, 19, 20, 33, 34, 36, -- 37, 35, 38, 39, 40, 41, 21, 22, 238, 311, -- 235, 239, 237, 236, 234, 281, 23, 24, 25, 26, -- 27, 28, 29, 310, 291, 80, 30, 31, 32, 18, -- 232, 200, 229, 233, 231, 230, 228, 226, 79, 12, -- 227, 225, 272, 273, 328, 16, 17, 6, 326, 315, -- 302, 19, 20, 33, 34, 36, 37, 35, 38, 39, -- 40, 41, 21, 22, 3, 301, 271, 269, 263, 174, -- 318, 68, 23, 24, 25, 26, 27, 28, 29, 240, -- 187, 186, 30, 31, 32, 18, 297, 139, 185, 184, -- 171, 169, 168, 71, 167, 12, 73, 73, 174, 112, -- 113, 16, 17, 6, 165, 102, 118, 19, 20, 33, -- 34, 36, 37, 35, 38, 39, 40, 41, 21, 22, -- 172, 105, 104, 59, 115, 119, 114, 120, 23, 24, -- 25, 26, 27, 28, 29, 118, 101, 83, 30, 31, -- 32, 18, 106, 108, 107, 11, 116, 117, 251, 10, -- 9, 82, 122, 115, 14, 8, 287, 16, 17, 13, -- 7, 70, 62, 109, 1, 110, 0, 0, 0, 0, -- 0, 106, 108, 107, 0, 116, 117, 0, 0, 0, -- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -- 0, 0, 109, 0, 110, 85, 86, 87, 88, 89, -- 90, 91, 92, 93, 94, 95, 96, 97, 98, -+ 47, 48, 49, 50, 47, 48, 49, 50, 131, 133, -+ 134, 65, 320, 100, 178, 133, 134, 104, 63, 64, -+ 148, 149, 228, 124, 188, 229, 227, 146, 147, 139, -+ 251, 251, 137, 61, 256, 144, 51, 52, 55, 56, -+ 53, 54, 45, 46, 47, 48, 49, 50, 253, 145, -+ 294, 320, 294, 150, 151, 152, 153, 154, 155, 156, -+ 157, 158, 159, 160, 161, 162, 163, 323, 119, 85, -+ 132, 66, 340, 173, 335, 184, 179, 182, 183, 180, -+ 181, 328, 166, 226, 191, 253, 116, 253, 317, 77, -+ 78, 186, 265, 101, 265, 202, 198, 311, 191, 310, -+ 254, 196, 206, 207, 199, 65, 286, 252, 75, 65, -+ 77, 78, 63, 64, 194, 302, 63, 64, 327, 65, -+ 260, 275, 214, 215, 216, 254, 63, 64, 325, 257, -+ 65, 194, 252, 65, 165, 197, 65, 63, 64, 197, -+ 63, 64, 253, 63, 64, 246, 249, 301, 255, 197, -+ 258, 137, 100, 261, 104, 262, 265, 304, 250, 247, -+ 197, 309, 259, 197, 285, 66, 197, 253, 263, 66, -+ 269, 271, 274, 276, 119, 279, 277, 65, 82, 66, -+ 206, 265, 251, 119, 63, 64, 308, 284, 166, 206, -+ 66, 295, 116, 66, 191, 119, 66, 166, 200, 273, -+ 287, 116, 289, 291, 126, 293, 100, 60, 272, 166, -+ 292, 303, 288, 116, 219, 100, 192, 224, 305, 187, -+ 225, 223, 86, 87, 88, 89, 90, 91, 92, 93, -+ 94, 95, 96, 97, 98, 99, 206, 66, 125, 314, -+ 315, 297, 298, 299, 100, 316, 119, 283, 167, 165, -+ 206, 318, 319, 265, 265, 270, 206, 324, 267, 266, -+ 167, 165, 213, 136, 116, 119, 15, 212, 12, 208, -+ 330, 12, 331, 332, 12, 205, 138, 211, 222, 138, -+ 210, 185, 6, 116, 336, 143, 19, 20, 33, 34, -+ 36, 37, 35, 38, 39, 40, 41, 21, 22, 142, -+ 141, 107, 109, 108, 81, 117, 118, 23, 24, 25, -+ 26, 27, 28, 29, 74, 338, 334, 30, 31, 32, -+ 18, 203, 110, 307, 111, 264, 220, 217, 209, 12, -+ 201, 193, 130, 221, 218, 333, 322, 6, 16, 17, -+ 128, 19, 20, 33, 34, 36, 37, 35, 38, 39, -+ 40, 41, 21, 22, 127, 243, 321, 129, 244, 242, -+ 80, 300, 23, 24, 25, 26, 27, 28, 29, 290, -+ 281, 282, 30, 31, 32, 18, 140, 240, 79, 237, -+ 241, 239, 238, 236, 12, 234, 339, 231, 235, 233, -+ 232, 230, 6, 16, 17, 119, 19, 20, 33, 34, -+ 36, 37, 35, 38, 39, 40, 41, 21, 22, 3, -+ 337, 326, 313, 116, 312, 278, 68, 23, 24, 25, -+ 26, 27, 28, 29, 268, 245, 190, 30, 31, 32, -+ 18, 107, 109, 108, 189, 117, 118, 256, 280, 188, -+ 187, 177, 113, 174, 172, 171, 329, 71, 16, 17, -+ 73, 306, 110, 170, 111, 73, 177, 114, 168, 103, -+ 175, 106, 105, 59, 120, 115, 121, 102, 84, 83, -+ 11, 10, 9, 123, 14, 8, 296, 13, 7, 70, -+ 62, 1, - } - var exprPact = [...]int{ - -- 298, -1000, -43, -1000, -1000, 218, 298, -1000, -1000, -1000, -- -1000, -1000, 421, 276, 75, -1000, 361, 348, 274, -1000, -+ 309, -1000, -45, -1000, -1000, 213, 309, -1000, -1000, -1000, -+ -1000, -1000, 495, 341, 135, -1000, 421, 403, 331, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -- -1000, -1000, 44, 44, 44, 44, 44, 44, 44, 44, -- 44, 44, 44, 44, 44, 44, 44, 218, -1000, 47, -- 460, -1000, 77, -1000, -1000, -1000, -1000, 244, 240, -43, -- 301, 302, -1000, 46, 260, 410, 268, 266, 265, -1000, -- -1000, 298, 298, 9, -1000, 298, 298, 298, 298, 298, -- 298, 298, 298, 298, 298, 298, 298, 298, 298, -1000, -- -1000, -1000, -1000, 185, -1000, -1000, 419, -1000, 416, -1000, -- 415, -1000, -1000, -1000, -1000, 238, 414, 423, 52, -1000, -- -1000, -1000, 261, -1000, -1000, -1000, -1000, -1000, 422, -1000, -- 413, 412, 405, 404, 211, 295, 205, 250, 214, 291, -- 354, 217, 183, 289, -29, 259, 256, 15, 15, -25, -- -25, -70, -70, -70, -70, -59, -59, -59, -59, -59, -- -59, 185, 238, 238, 238, 288, -1000, 310, -1000, -1000, -- 247, -1000, 287, -1000, 307, 126, 66, 363, 358, 356, -- 336, 334, 403, -1000, -1000, -1000, -1000, -1000, -1000, 91, -- 250, 115, 138, 202, 431, 159, 172, 91, 298, 210, -- 285, 191, -1000, -1000, 148, -1000, 392, 170, 141, 253, -- 185, 116, 419, 391, -1000, 394, 367, 255, -1000, -1000, -- -1000, 246, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -+ -1000, -1000, 79, 79, 79, 79, 79, 79, 79, 79, -+ 79, 79, 79, 79, 79, 79, 79, 213, -1000, 47, -+ 310, -1000, 8, -1000, -1000, -1000, -1000, 264, 230, -45, -+ 388, 366, -1000, 46, 306, 419, 327, 326, 312, -1000, -+ -1000, 309, 309, 11, 2, -1000, 309, 309, 309, 309, -+ 309, 309, 309, 309, 309, 309, 309, 309, 309, 309, -+ -1000, -1000, -1000, -1000, 228, -1000, -1000, 498, -1000, 489, -+ -1000, 488, -1000, -1000, -1000, -1000, 291, 487, 501, 52, -+ -1000, -1000, -1000, 308, -1000, -1000, -1000, -1000, -1000, 500, -+ -1000, 484, 483, 478, 470, 242, 362, 172, 303, 224, -+ 361, 364, 301, 295, 359, -31, 307, 304, 294, 289, -+ 13, 13, -27, -27, -72, -72, -72, -72, -61, -61, -+ -61, -61, -61, -61, 228, 291, 291, 291, 358, -1000, -+ 372, -1000, -1000, 240, -1000, 357, -1000, 371, 263, 68, -+ 433, 431, 425, 423, 401, 469, -1000, -1000, -1000, -1000, -+ -1000, -1000, 114, 303, 169, 148, 166, 440, 155, 146, -+ 114, 309, 194, 356, 285, -1000, -1000, 284, -1000, 468, -+ 281, 234, 225, 147, 219, 228, 113, 498, 459, -1000, -+ 486, 415, 274, -1000, -1000, -1000, 214, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -- 209, -1000, 155, 215, 33, 215, 337, -49, 238, -49, -- 111, 239, 345, 142, 140, -1000, -1000, 206, -1000, 298, -- 411, -1000, -1000, 283, 139, -1000, 124, -1000, -1000, -1000, -- -1000, -1000, -1000, -1000, 389, 374, -1000, 91, 33, 215, -- 33, -1000, -1000, 185, -1000, -49, -1000, 219, -1000, -1000, -- -1000, 18, 344, 330, 132, 91, 204, -1000, 373, -1000, -- -1000, 201, 165, -1000, 33, -1000, 395, 36, 33, 24, -- -49, -49, 315, -1000, -1000, 282, -1000, -1000, 153, 33, -- -1000, -1000, -49, 372, -1000, -1000, 281, 368, 136, -1000, -+ -1000, -1000, -1000, -1000, -1000, 190, -1000, 132, 145, 54, -+ 145, 411, 17, 291, 17, 91, 236, 402, 173, 141, -+ -1000, -1000, 183, -1000, 309, 496, -1000, -1000, 354, 212, -+ -1000, 187, -1000, -1000, 125, -1000, 123, -1000, -1000, -1000, -+ -1000, -1000, -1000, 458, 456, -1000, 114, 54, 145, 54, -+ -1000, -1000, 228, -1000, 17, -1000, 115, -1000, -1000, -1000, -+ 18, 397, 377, 93, 114, 154, -1000, 455, -1000, -1000, -+ -1000, -1000, 144, 107, -1000, 54, -1000, 491, 57, 54, -+ 37, 17, 17, 376, -1000, -1000, 347, -1000, -1000, 100, -+ 54, -1000, -1000, 17, 454, -1000, -1000, 346, 430, 98, -+ -1000, - } - var exprPgo = [...]int{ - -- 0, 494, 16, 492, 2, 9, 394, 3, 15, 11, -- 491, 490, 489, 486, 7, 485, 484, 482, 480, 479, -- 475, 481, 467, 466, 13, 5, 457, 456, 455, 6, -- 453, 78, 452, 451, 4, 450, 435, 8, 434, 1, -- 430, 429, 0, -+ 0, 531, 16, 530, 2, 9, 459, 3, 15, 11, -+ 529, 528, 527, 526, 7, 525, 524, 523, 522, 521, -+ 520, 228, 519, 518, 517, 13, 5, 516, 515, 514, -+ 6, 513, 83, 512, 511, 4, 510, 509, 8, 508, -+ 1, 507, 492, 0, - } - var exprR1 = [...]int{ - - 0, 1, 2, 2, 7, 7, 7, 7, 7, 7, - 6, 6, 6, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, -- 8, 8, 8, 8, 8, 8, 8, 8, 8, 39, -- 39, 39, 13, 13, 13, 11, 11, 11, 11, 15, -+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 40, -+ 40, 40, 13, 13, 13, 11, 11, 11, 11, 15, - 15, 15, 15, 15, 15, 20, 3, 3, 3, 3, -- 14, 14, 14, 10, 10, 9, 9, 9, 9, 24, -- 24, 25, 25, 25, 25, 25, 25, 17, 31, 31, -- 30, 30, 23, 23, 23, 23, 23, 36, 32, 34, -- 34, 35, 35, 35, 33, 29, 29, 29, 29, 29, -- 29, 29, 29, 29, 37, 38, 38, 41, 41, 40, -- 40, 28, 28, 28, 28, 28, 28, 28, 26, 26, -- 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, -- 27, 27, 18, 18, 18, 18, 18, 18, 18, 18, -- 18, 18, 18, 18, 18, 18, 18, 21, 21, 21, -- 21, 21, 22, 22, 19, 19, 19, 16, 16, 16, -- 16, 16, 16, 16, 16, 16, 12, 12, 12, 12, -- 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, -- 42, 5, 5, 4, 4, 4, 4, -+ 14, 14, 14, 10, 10, 9, 9, 9, 9, 25, -+ 25, 26, 26, 26, 26, 26, 26, 17, 32, 32, -+ 31, 31, 24, 24, 24, 24, 24, 37, 33, 35, -+ 35, 36, 36, 36, 34, 30, 30, 30, 30, 30, -+ 30, 30, 30, 30, 38, 39, 39, 42, 42, 41, -+ 41, 29, 29, 29, 29, 29, 29, 29, 27, 27, -+ 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, -+ 28, 28, 18, 18, 18, 18, 18, 18, 18, 18, -+ 18, 18, 18, 18, 18, 18, 18, 22, 22, 23, -+ 23, 23, 23, 21, 21, 21, 21, 21, 21, 21, -+ 21, 19, 19, 19, 16, 16, 16, 16, 16, 16, -+ 16, 16, 16, 12, 12, 12, 12, 12, 12, 12, -+ 12, 12, 12, 12, 12, 12, 12, 43, 5, 5, -+ 4, 4, 4, 4, - } - var exprR2 = [...]int{ - -@@ -380,83 +388,88 @@ var exprR2 = [...]int{ - 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, -- 4, 4, 4, 4, 4, 4, 4, 1, 5, 4, -- 5, 4, 0, 1, 1, 2, 2, 1, 1, 1, -- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -+ 4, 4, 4, 4, 4, 4, 4, 0, 1, 5, -+ 4, 5, 4, 1, 1, 2, 4, 5, 2, 4, -+ 5, 1, 2, 2, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -- 2, 1, 3, 4, 4, 3, 3, -+ 1, 1, 1, 1, 1, 1, 1, 2, 1, 3, -+ 4, 4, 3, 3, - } - var exprChk = [...]int{ - - -1000, -1, -2, -6, -7, -14, 23, -11, -15, -18, -- -19, -20, 15, -12, -16, 7, 77, 78, 61, 27, -+ -19, -20, 15, -12, -16, 7, 79, 80, 61, 27, - 28, 38, 39, 48, 49, 50, 51, 52, 53, 54, - 58, 59, 60, 29, 30, 33, 31, 32, 34, 35, -- 36, 37, 68, 69, 70, 77, 78, 79, 80, 81, -- 82, 71, 72, 75, 76, 73, 74, -24, -25, -30, -- 44, -31, -3, 21, 22, 14, 72, -7, -6, -2, -+ 36, 37, 70, 71, 72, 79, 80, 81, 82, 83, -+ 84, 73, 74, 77, 78, 75, 76, -25, -26, -31, -+ 44, -32, -3, 21, 22, 14, 74, -7, -6, -2, - -10, 2, -9, 5, 23, 23, -4, 25, 26, 7, -- 7, 23, -21, -22, 40, -21, -21, -21, -21, -21, -- -21, -21, -21, -21, -21, -21, -21, -21, -21, -25, -- -31, -23, -36, -29, -32, -33, 41, 43, 42, 62, -- 64, -9, -41, -40, -27, 23, 45, 46, 5, -28, -- -26, 6, -17, 65, 24, 24, 16, 2, 19, 16, -- 12, 72, 13, 14, -8, 7, -14, 23, -7, 7, -- 23, 23, 23, -7, -2, 66, 67, -2, -2, -2, -+ 7, 23, -21, -22, -23, 40, -21, -21, -21, -21, -+ -21, -21, -21, -21, -21, -21, -21, -21, -21, -21, -+ -26, -32, -24, -37, -30, -33, -34, 41, 43, 42, -+ 62, 64, -9, -42, -41, -28, 23, 45, 46, 5, -+ -29, -27, 6, -17, 65, 24, 24, 16, 2, 19, -+ 16, 12, 74, 13, 14, -8, 7, -14, 23, -7, -+ 7, 23, 23, 23, -7, -2, 66, 67, 68, 69, - -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -- -2, -29, 69, 19, 68, -38, -37, 5, 6, 6, -- -29, 6, -35, -34, 5, 12, 72, 75, 76, 73, -- 74, 71, 23, -9, 6, 6, 6, 6, 2, 24, -- 19, 9, -39, -24, 44, -14, -8, 24, 19, -7, -- 7, -5, 24, 5, -5, 24, 19, 23, 23, -29, -- -29, -29, 19, 12, 24, 19, 12, 65, 8, 4, -- 7, 65, 8, 4, 7, 8, 4, 7, 8, 4, -- 7, 8, 4, 7, 8, 4, 7, 8, 4, 7, -- 6, -4, -8, -42, -39, -24, 63, 9, 44, 9, -- -39, 47, 24, -39, -24, 24, -4, -7, 24, 19, -- 19, 24, 24, 6, -5, 24, -5, 24, -37, 6, -- -34, 2, 5, 6, 23, 23, 24, 24, -39, -24, -- -39, 8, -42, -29, -42, 9, 5, -13, 55, 56, -- 57, 9, 24, 24, -39, 24, -7, 5, 19, 24, -- 24, 6, 6, -4, -39, -42, 23, -42, -39, 44, -- 9, 9, 24, -4, 24, 6, 24, 24, 5, -39, -- -42, -42, 9, 19, 24, -42, 6, 19, 6, 24, -+ -2, -2, -2, -2, -30, 71, 19, 70, -39, -38, -+ 5, 6, 6, -30, 6, -36, -35, 5, 12, 74, -+ 77, 78, 75, 76, 73, 23, -9, 6, 6, 6, -+ 6, 2, 24, 19, 9, -40, -25, 44, -14, -8, -+ 24, 19, -7, 7, -5, 24, 5, -5, 24, 19, -+ 23, 23, 23, 23, -30, -30, -30, 19, 12, 24, -+ 19, 12, 65, 8, 4, 7, 65, 8, 4, 7, -+ 8, 4, 7, 8, 4, 7, 8, 4, 7, 8, -+ 4, 7, 8, 4, 7, 6, -4, -8, -43, -40, -+ -25, 63, 9, 44, 9, -40, 47, 24, -40, -25, -+ 24, -4, -7, 24, 19, 19, 24, 24, 6, -5, -+ 24, -5, 24, 24, -5, 24, -5, -38, 6, -35, -+ 2, 5, 6, 23, 23, 24, 24, -40, -25, -40, -+ 8, -43, -30, -43, 9, 5, -13, 55, 56, 57, -+ 9, 24, 24, -40, 24, -7, 5, 19, 24, 24, -+ 24, 24, 6, 6, -4, -40, -43, 23, -43, -40, -+ 44, 9, 9, 24, -4, 24, 6, 24, 24, 5, -+ -40, -43, -43, 9, 19, 24, -43, 6, 19, 6, -+ 24, - } - var exprDef = [...]int{ - - 0, -2, 1, 2, 3, 10, 0, 4, 5, 6, -- 7, 8, 0, 0, 0, 154, 0, 0, 0, 166, -- 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, -- 177, 178, 179, 157, 158, 159, 160, 161, 162, 163, -- 164, 165, 152, 152, 152, 152, 152, 152, 152, 152, -- 152, 152, 152, 152, 152, 152, 152, 11, 69, 71, -+ 7, 8, 0, 0, 0, 161, 0, 0, 0, 173, -+ 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, -+ 184, 185, 186, 164, 165, 166, 167, 168, 169, 170, -+ 171, 172, 147, 147, 147, 147, 147, 147, 147, 147, -+ 147, 147, 147, 147, 147, 147, 147, 11, 69, 71, - 0, 80, 0, 56, 57, 58, 59, 3, 2, 0, -- 0, 0, 63, 0, 0, 0, 0, 0, 0, 155, -- 156, 0, 0, 147, 153, 0, 0, 0, 0, 0, -- 0, 0, 0, 0, 0, 0, 0, 0, 0, 70, -- 81, 72, 73, 74, 75, 76, 82, 83, 0, 85, -- 0, 95, 96, 97, 98, 0, 0, 0, 0, 109, -- 110, 78, 0, 77, 9, 12, 60, 61, 0, 62, -- 0, 0, 0, 0, 0, 0, 0, 0, 3, 154, -- 0, 0, 0, 3, 132, 0, 0, 133, 134, 135, -- 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, -- 146, 100, 0, 0, 0, 87, 105, 0, 84, 86, -- 0, 88, 94, 91, 0, 0, 0, 0, 0, 0, -- 0, 0, 0, 64, 65, 66, 67, 68, 38, 45, -- 0, 13, 0, 0, 0, 0, 0, 49, 0, 3, -- 154, 0, 185, 181, 0, 186, 0, 0, 0, 101, -- 102, 103, 0, 0, 99, 0, 0, 0, 116, 123, -- 130, 0, 115, 122, 129, 111, 118, 125, 112, 119, -- 126, 113, 120, 127, 114, 121, 128, 117, 124, 131, -- 0, 47, 0, 14, 17, 33, 0, 21, 0, 25, -- 0, 0, 0, 0, 0, 37, 51, 3, 50, 0, -- 0, 183, 184, 0, 0, 149, 0, 151, 106, 104, -- 92, 93, 89, 90, 0, 0, 79, 46, 18, 34, -- 35, 180, 22, 41, 26, 29, 39, 0, 42, 43, -- 44, 15, 0, 0, 0, 52, 3, 182, 0, 148, -- 150, 0, 0, 48, 36, 30, 0, 16, 19, 0, -- 23, 27, 0, 53, 54, 0, 107, 108, 0, 20, -- 24, 28, 31, 0, 40, 32, 0, 0, 0, 55, -+ 0, 0, 63, 0, 0, 0, 0, 0, 0, 162, -+ 163, 0, 0, 153, 154, 148, 0, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ 70, 81, 72, 73, 74, 75, 76, 82, 83, 0, -+ 85, 0, 95, 96, 97, 98, 0, 0, 0, 0, -+ 109, 110, 78, 0, 77, 9, 12, 60, 61, 0, -+ 62, 0, 0, 0, 0, 0, 0, 0, 0, 3, -+ 161, 0, 0, 0, 3, 132, 0, 0, 155, 158, -+ 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, -+ 143, 144, 145, 146, 100, 0, 0, 0, 87, 105, -+ 0, 84, 86, 0, 88, 94, 91, 0, 0, 0, -+ 0, 0, 0, 0, 0, 0, 64, 65, 66, 67, -+ 68, 38, 45, 0, 13, 0, 0, 0, 0, 0, -+ 49, 0, 3, 161, 0, 192, 188, 0, 193, 0, -+ 0, 0, 0, 0, 101, 102, 103, 0, 0, 99, -+ 0, 0, 0, 116, 123, 130, 0, 115, 122, 129, -+ 111, 118, 125, 112, 119, 126, 113, 120, 127, 114, -+ 121, 128, 117, 124, 131, 0, 47, 0, 14, 17, -+ 33, 0, 21, 0, 25, 0, 0, 0, 0, 0, -+ 37, 51, 3, 50, 0, 0, 190, 191, 0, 0, -+ 150, 0, 152, 156, 0, 159, 0, 106, 104, 92, -+ 93, 89, 90, 0, 0, 79, 46, 18, 34, 35, -+ 187, 22, 41, 26, 29, 39, 0, 42, 43, 44, -+ 15, 0, 0, 0, 52, 3, 189, 0, 149, 151, -+ 157, 160, 0, 0, 48, 36, 30, 0, 16, 19, -+ 0, 23, 27, 0, 53, 54, 0, 107, 108, 0, -+ 20, 24, 28, 31, 0, 40, 32, 0, 0, 0, -+ 55, - } - var exprTok1 = [...]int{ - -@@ -472,7 +485,7 @@ var exprTok2 = [...]int{ - 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, - 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, -- 82, -+ 82, 83, 84, - } - var exprTok3 = [...]int{ - 0, -@@ -1534,205 +1547,248 @@ exprdefault: - exprVAL.BinOpExpr = mustNewBinOpExpr(""<="", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) - } - case 147: -- exprDollar = exprS[exprpt-1 : exprpt+1] -+ exprDollar = exprS[exprpt-0 : exprpt+1] - { -- exprVAL.BinOpModifier = exprDollar[1].BoolModifier -+ exprVAL.BoolModifier = &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}} - } - case 148: -- exprDollar = exprS[exprpt-5 : exprpt+1] -+ exprDollar = exprS[exprpt-1 : exprpt+1] - { -- exprVAL.BinOpModifier = exprDollar[1].BoolModifier -- exprVAL.BinOpModifier.VectorMatching = &VectorMatching{On: true, Include: exprDollar[4].Labels} -+ exprVAL.BoolModifier = &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}, ReturnBool: true} - } - case 149: -- exprDollar = exprS[exprpt-4 : exprpt+1] -+ exprDollar = exprS[exprpt-5 : exprpt+1] - { -- exprVAL.BinOpModifier = exprDollar[1].BoolModifier -- exprVAL.BinOpModifier.VectorMatching = &VectorMatching{On: true, Include: nil} -+ exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier -+ exprVAL.OnOrIgnoringModifier.VectorMatching.On = true -+ exprVAL.OnOrIgnoringModifier.VectorMatching.MatchingLabels = exprDollar[4].Labels - } - case 150: -- exprDollar = exprS[exprpt-5 : exprpt+1] -+ exprDollar = exprS[exprpt-4 : exprpt+1] - { -- exprVAL.BinOpModifier = exprDollar[1].BoolModifier -- exprVAL.BinOpModifier.VectorMatching = &VectorMatching{On: false, Include: exprDollar[4].Labels} -+ exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier -+ exprVAL.OnOrIgnoringModifier.VectorMatching.On = true - } - case 151: -- exprDollar = exprS[exprpt-4 : exprpt+1] -+ exprDollar = exprS[exprpt-5 : exprpt+1] - { -- exprVAL.BinOpModifier = exprDollar[1].BoolModifier -- exprVAL.BinOpModifier.VectorMatching = &VectorMatching{On: false, Include: nil} -+ exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier -+ exprVAL.OnOrIgnoringModifier.VectorMatching.MatchingLabels = exprDollar[4].Labels - } - case 152: -- exprDollar = exprS[exprpt-0 : exprpt+1] -+ exprDollar = exprS[exprpt-4 : exprpt+1] - { -- exprVAL.BoolModifier = &BinOpOptions{} -+ exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier - } - case 153: - exprDollar = exprS[exprpt-1 : exprpt+1] - { -- exprVAL.BoolModifier = &BinOpOptions{ReturnBool: true} -+ exprVAL.BinOpModifier = exprDollar[1].BoolModifier - } - case 154: - exprDollar = exprS[exprpt-1 : exprpt+1] - { -- exprVAL.LiteralExpr = mustNewLiteralExpr(exprDollar[1].str, false) -+ exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier - } - case 155: - exprDollar = exprS[exprpt-2 : exprpt+1] - { -- exprVAL.LiteralExpr = mustNewLiteralExpr(exprDollar[2].str, false) -+ exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier -+ exprVAL.BinOpModifier.VectorMatching.Card = CardManyToOne - } - case 156: -+ exprDollar = exprS[exprpt-4 : exprpt+1] -+ { -+ exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier -+ exprVAL.BinOpModifier.VectorMatching.Card = CardManyToOne -+ } -+ case 157: -+ exprDollar = exprS[exprpt-5 : exprpt+1] -+ { -+ exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier -+ exprVAL.BinOpModifier.VectorMatching.Card = CardManyToOne -+ exprVAL.BinOpModifier.VectorMatching.Include = exprDollar[4].Labels -+ } -+ case 158: -+ exprDollar = exprS[exprpt-2 : exprpt+1] -+ { -+ exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier -+ exprVAL.BinOpModifier.VectorMatching.Card = CardOneToMany -+ } -+ case 159: -+ exprDollar = exprS[exprpt-4 : exprpt+1] -+ { -+ exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier -+ exprVAL.BinOpModifier.VectorMatching.Card = CardOneToMany -+ } -+ case 160: -+ exprDollar = exprS[exprpt-5 : exprpt+1] -+ { -+ exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier -+ exprVAL.BinOpModifier.VectorMatching.Card = CardOneToMany -+ exprVAL.BinOpModifier.VectorMatching.Include = exprDollar[4].Labels -+ } -+ case 161: -+ exprDollar = exprS[exprpt-1 : exprpt+1] -+ { -+ exprVAL.LiteralExpr = mustNewLiteralExpr(exprDollar[1].str, false) -+ } -+ case 162: -+ exprDollar = exprS[exprpt-2 : exprpt+1] -+ { -+ exprVAL.LiteralExpr = mustNewLiteralExpr(exprDollar[2].str, false) -+ } -+ case 163: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.LiteralExpr = mustNewLiteralExpr(exprDollar[2].str, true) - } -- case 157: -+ case 164: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeSum - } -- case 158: -+ case 165: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeAvg - } -- case 159: -+ case 166: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeCount - } -- case 160: -+ case 167: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeMax - } -- case 161: -+ case 168: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeMin - } -- case 162: -+ case 169: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeStddev - } -- case 163: -+ case 170: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeStdvar - } -- case 164: -+ case 171: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeBottomK - } -- case 165: -+ case 172: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeTopK - } -- case 166: -+ case 173: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeCount - } -- case 167: -+ case 174: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeRate - } -- case 168: -+ case 175: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeBytes - } -- case 169: -+ case 176: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeBytesRate - } -- case 170: -+ case 177: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeAvg - } -- case 171: -+ case 178: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeSum - } -- case 172: -+ case 179: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeMin - } -- case 173: -+ case 180: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeMax - } -- case 174: -+ case 181: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeStdvar - } -- case 175: -+ case 182: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeStddev - } -- case 176: -+ case 183: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeQuantile - } -- case 177: -+ case 184: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeFirst - } -- case 178: -+ case 185: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeLast - } -- case 179: -+ case 186: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeAbsent - } -- case 180: -+ case 187: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.OffsetExpr = newOffsetExpr(exprDollar[2].duration) - } -- case 181: -+ case 188: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.Labels = []string{exprDollar[1].str} - } -- case 182: -+ case 189: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Labels = append(exprDollar[1].Labels, exprDollar[3].str) - } -- case 183: -+ case 190: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.Grouping = &Grouping{Without: false, Groups: exprDollar[3].Labels} - } -- case 184: -+ case 191: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.Grouping = &Grouping{Without: true, Groups: exprDollar[3].Labels} - } -- case 185: -+ case 192: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Grouping = &Grouping{Without: false, Groups: nil} - } -- case 186: -+ case 193: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Grouping = &Grouping{Without: true, Groups: nil} -diff --git a/pkg/logql/lex.go b/pkg/logql/lex.go -index a23dd1cd6f0b7..10fe28d21bb42 100644 ---- a/pkg/logql/lex.go -+++ b/pkg/logql/lex.go -@@ -38,6 +38,8 @@ var tokens = map[string]int{ - OpOffset: OFFSET, - OpOn: ON, - OpIgnoring: IGNORING, -+ OpGroupLeft: GROUP_LEFT, -+ OpGroupRight: GROUP_RIGHT, - - // binops - OpTypeOr: OR, -diff --git a/pkg/logql/parser_test.go b/pkg/logql/parser_test.go -index ff323f2c9fc24..a14268534ab51 100644 ---- a/pkg/logql/parser_test.go -+++ b/pkg/logql/parser_test.go -@@ -938,10 +938,14 @@ func TestParse(t *testing.T) { - `, - exp: mustNewBinOpExpr( - OpTypeDiv, -- &BinOpOptions{}, -+ &BinOpOptions{ -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, -+ }, - mustNewBinOpExpr( - OpTypeDiv, -- &BinOpOptions{}, -+ &BinOpOptions{ -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, -+ }, - mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ - Left: &MatchersExpr{ -@@ -1001,10 +1005,14 @@ func TestParse(t *testing.T) { - `, - exp: mustNewBinOpExpr( - OpTypeDiv, -- &BinOpOptions{}, -+ &BinOpOptions{ -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, -+ }, - mustNewBinOpExpr( - OpTypePow, -- &BinOpOptions{}, -+ &BinOpOptions{ -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, -+ }, - mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ - Left: &MatchersExpr{ -@@ -1065,7 +1073,9 @@ func TestParse(t *testing.T) { - `, - exp: mustNewBinOpExpr( - OpTypeAdd, -- &BinOpOptions{}, -+ &BinOpOptions{ -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, -+ }, - mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ - Left: &MatchersExpr{ -@@ -1084,7 +1094,9 @@ func TestParse(t *testing.T) { - ), - mustNewBinOpExpr( - OpTypeDiv, -- &BinOpOptions{}, -+ &BinOpOptions{ -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, -+ }, - mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ - Left: &MatchersExpr{ -@@ -1128,7 +1140,9 @@ func TestParse(t *testing.T) { - )`, - exp: mustNewVectorAggregationExpr( - mustNewBinOpExpr(OpTypeDiv, -- &BinOpOptions{}, -+ &BinOpOptions{ -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, -+ }, - newRangeAggregationExpr( - &LogRange{ - Left: newPipelineExpr( -@@ -1156,9 +1170,13 @@ func TestParse(t *testing.T) { - / - count_over_time({namespace=""tns""}[5m]) - ) * 100`, -- exp: mustNewBinOpExpr(OpTypeMul, &BinOpOptions{}, mustNewVectorAggregationExpr( -+ exp: mustNewBinOpExpr(OpTypeMul, &BinOpOptions{ -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, -+ }, mustNewVectorAggregationExpr( - mustNewBinOpExpr(OpTypeDiv, -- &BinOpOptions{}, -+ &BinOpOptions{ -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, -+ }, - newRangeAggregationExpr( - &LogRange{ - Left: newPipelineExpr( -@@ -1187,7 +1205,9 @@ func TestParse(t *testing.T) { - in: `sum(count_over_time({foo=""bar""}[5m])) by (foo) + 1 / 2`, - exp: mustNewBinOpExpr( - OpTypeAdd, -- &BinOpOptions{}, -+ &BinOpOptions{ -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, -+ }, - mustNewVectorAggregationExpr( - newRangeAggregationExpr( - &LogRange{ -@@ -1213,9 +1233,13 @@ func TestParse(t *testing.T) { - in: `1 + -2 / 1`, - exp: mustNewBinOpExpr( - OpTypeAdd, -- &BinOpOptions{}, -+ &BinOpOptions{ -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, -+ }, - &LiteralExpr{value: 1}, -- mustNewBinOpExpr(OpTypeDiv, &BinOpOptions{}, &LiteralExpr{value: -2}, &LiteralExpr{value: 1}), -+ mustNewBinOpExpr(OpTypeDiv, &BinOpOptions{ -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, -+ }, &LiteralExpr{value: -2}, &LiteralExpr{value: 1}), - ), - }, - { -@@ -1223,8 +1247,12 @@ func TestParse(t *testing.T) { - in: `1 + 1 - -1`, - exp: mustNewBinOpExpr( - OpTypeSub, -- &BinOpOptions{}, -- mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{}, &LiteralExpr{value: 1}, &LiteralExpr{value: 1}), -+ &BinOpOptions{ -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, -+ }, -+ mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{ -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, -+ }, &LiteralExpr{value: 1}, &LiteralExpr{value: 1}), - &LiteralExpr{value: -1}, - ), - }, -@@ -2109,7 +2137,8 @@ func TestParse(t *testing.T) { - ) by (namespace,instance) - ) by (foo,bar) - `, -- exp: mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{ReturnBool: false}, -+ exp: mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{ -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, ReturnBool: false}, - mustNewVectorAggregationExpr( - newRangeAggregationExpr( - newLogRange(&PipelineExpr{ -@@ -2190,7 +2219,7 @@ func TestParse(t *testing.T) { - ) by (namespace,instance) - ) by (foo) - `, -- exp: mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{ReturnBool: false, VectorMatching: &VectorMatching{On: false, Include: []string{""bar""}}}, -+ exp: mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{ReturnBool: false, VectorMatching: &VectorMatching{Card: CardOneToOne, On: false, MatchingLabels: []string{""bar""}}}, - mustNewVectorAggregationExpr( - newRangeAggregationExpr( - newLogRange(&PipelineExpr{ -@@ -2271,7 +2300,88 @@ func TestParse(t *testing.T) { - ) by (namespace,instance) - ) by (foo) - `, -- exp: mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{ReturnBool: false, VectorMatching: &VectorMatching{On: true, Include: []string{""foo""}}}, -+ exp: mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{ReturnBool: false, VectorMatching: &VectorMatching{Card: CardOneToOne, On: true, MatchingLabels: []string{""foo""}}}, -+ mustNewVectorAggregationExpr( -+ newRangeAggregationExpr( -+ newLogRange(&PipelineExpr{ -+ Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), -+ MultiStages: MultiStageExpr{ -+ newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLabelParserExpr(OpParserTypeJSON, """"), -+ &LabelFilterExpr{ -+ LabelFilterer: log.NewOrLabelFilter( -+ log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, ""latency"", 250*time.Millisecond), -+ log.NewAndLabelFilter( -+ log.NewNumericLabelFilter(log.LabelFilterLesserThan, ""status_code"", 500.0), -+ log.NewNumericLabelFilter(log.LabelFilterGreaterThan, ""status_code"", 200.0), -+ ), -+ ), -+ }, -+ newLineFmtExpr(""blip{{ .foo }}blop {{.status_code}}""), -+ newLabelFmtExpr([]log.LabelFmt{ -+ log.NewRenameLabelFmt(""foo"", ""bar""), -+ log.NewTemplateLabelFmt(""status_code"", ""buzz{{.bar}}""), -+ }), -+ }, -+ }, -+ 5*time.Minute, -+ newUnwrapExpr(""foo"", """"), -+ nil), -+ OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{""namespace"", ""instance""}}, NewStringLabelFilter(""0.99998""), -+ ), -+ OpTypeSum, -+ &Grouping{Groups: []string{""foo"", ""bar""}}, -+ nil, -+ ), -+ mustNewVectorAggregationExpr( -+ newRangeAggregationExpr( -+ newLogRange(&PipelineExpr{ -+ Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), -+ MultiStages: MultiStageExpr{ -+ newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLabelParserExpr(OpParserTypeJSON, """"), -+ &LabelFilterExpr{ -+ LabelFilterer: log.NewOrLabelFilter( -+ log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, ""latency"", 250*time.Millisecond), -+ log.NewAndLabelFilter( -+ log.NewNumericLabelFilter(log.LabelFilterLesserThan, ""status_code"", 500.0), -+ log.NewNumericLabelFilter(log.LabelFilterGreaterThan, ""status_code"", 200.0), -+ ), -+ ), -+ }, -+ newLineFmtExpr(""blip{{ .foo }}blop {{.status_code}}""), -+ newLabelFmtExpr([]log.LabelFmt{ -+ log.NewRenameLabelFmt(""foo"", ""bar""), -+ log.NewTemplateLabelFmt(""status_code"", ""buzz{{.bar}}""), -+ }), -+ }, -+ }, -+ 5*time.Minute, -+ newUnwrapExpr(""foo"", """"), -+ nil), -+ OpRangeTypeAvg, &Grouping{Without: false, Groups: []string{""namespace"", ""instance""}}, nil, -+ ), -+ OpTypeAvg, -+ &Grouping{Groups: []string{""foo""}}, -+ nil, -+ ), -+ ), -+ }, -+ { -+ in: ` -+ sum by (foo,bar) ( -+ quantile_over_time(0.99998,{app=""foo""} |= ""bar"" | json | latency >= 250ms or ( status_code < 500 and status_code > 200) -+ | line_format ""blip{{ .foo }}blop {{.status_code}}"" | label_format foo=bar,status_code=""buzz{{.bar}}"" | unwrap foo [5m] -+ ) by (namespace,instance) -+ ) -+ + ignoring (bar) group_left (foo) -+ avg( -+ avg_over_time({app=""foo""} |= ""bar"" | json | latency >= 250ms or ( status_code < 500 and status_code > 200) -+ | line_format ""blip{{ .foo }}blop {{.status_code}}"" | label_format foo=bar,status_code=""buzz{{.bar}}"" | unwrap foo [5m] -+ ) by (namespace,instance) -+ ) by (foo) -+ `, -+ exp: mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{ReturnBool: false, VectorMatching: &VectorMatching{Card: CardManyToOne, Include: []string{""foo""}, On: false, MatchingLabels: []string{""bar""}}}, - mustNewVectorAggregationExpr( - newRangeAggregationExpr( - newLogRange(&PipelineExpr{ -@@ -2338,6 +2448,68 @@ func TestParse(t *testing.T) { - ), - ), - }, -+ { -+ in: ` -+ sum by (app,machine) (count_over_time({app=""foo""}[1m])) > bool on () group_right (app) sum by (app) (count_over_time({app=""foo""}[1m])) -+ `, -+ exp: mustNewBinOpExpr(OpTypeGT, &BinOpOptions{ReturnBool: true, VectorMatching: &VectorMatching{Card: CardOneToMany, Include: []string{""app""}, On: true, MatchingLabels: nil}}, -+ mustNewVectorAggregationExpr( -+ newRangeAggregationExpr( -+ &LogRange{ -+ Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), -+ Interval: 1 * time.Minute, -+ }, -+ OpRangeTypeCount, nil, nil, -+ ), -+ OpTypeSum, -+ &Grouping{Groups: []string{""app"", ""machine""}}, -+ nil, -+ ), -+ mustNewVectorAggregationExpr( -+ newRangeAggregationExpr( -+ &LogRange{ -+ Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), -+ Interval: 1 * time.Minute, -+ }, -+ OpRangeTypeCount, nil, nil, -+ ), -+ OpTypeSum, -+ &Grouping{Groups: []string{""app""}}, -+ nil, -+ ), -+ ), -+ }, -+ { -+ in: ` -+ sum by (app,machine) (count_over_time({app=""foo""}[1m])) > bool on () group_right sum by (app) (count_over_time({app=""foo""}[1m])) -+ `, -+ exp: mustNewBinOpExpr(OpTypeGT, &BinOpOptions{ReturnBool: true, VectorMatching: &VectorMatching{Card: CardOneToMany, Include: nil, On: true, MatchingLabels: nil}}, -+ mustNewVectorAggregationExpr( -+ newRangeAggregationExpr( -+ &LogRange{ -+ Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), -+ Interval: 1 * time.Minute, -+ }, -+ OpRangeTypeCount, nil, nil, -+ ), -+ OpTypeSum, -+ &Grouping{Groups: []string{""app"", ""machine""}}, -+ nil, -+ ), -+ mustNewVectorAggregationExpr( -+ newRangeAggregationExpr( -+ &LogRange{ -+ Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), -+ Interval: 1 * time.Minute, -+ }, -+ OpRangeTypeCount, nil, nil, -+ ), -+ OpTypeSum, -+ &Grouping{Groups: []string{""app""}}, -+ nil, -+ ), -+ ), -+ }, - { - in: ` - label_replace( -@@ -2358,7 +2530,7 @@ func TestParse(t *testing.T) { - ""(.*)"" - )`, - exp: mustNewLabelReplaceExpr( -- mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{ReturnBool: false}, -+ mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}, ReturnBool: false}, - mustNewVectorAggregationExpr( - newRangeAggregationExpr( - newLogRange(&PipelineExpr{ -@@ -2497,7 +2669,7 @@ func TestParse(t *testing.T) { - Op: OpTypeGT, - Opts: &BinOpOptions{ - ReturnBool: false, -- VectorMatching: nil, -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, - }, - SampleExpr: &RangeAggregationExpr{ - Left: &LogRange{ -@@ -2521,7 +2693,7 @@ func TestParse(t *testing.T) { - Op: OpTypeGT, - Opts: &BinOpOptions{ - ReturnBool: false, -- VectorMatching: nil, -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, - }, - SampleExpr: &RangeAggregationExpr{ - Left: &LogRange{ -@@ -2544,7 +2716,7 @@ func TestParse(t *testing.T) { - Op: OpTypeOr, - Opts: &BinOpOptions{ - ReturnBool: false, -- VectorMatching: nil, -+ VectorMatching: &VectorMatching{}, - }, - SampleExpr: &RangeAggregationExpr{ - Left: &LogRange{ -@@ -2557,7 +2729,7 @@ func TestParse(t *testing.T) { - Op: OpTypeGT, - Opts: &BinOpOptions{ - ReturnBool: false, -- VectorMatching: nil, -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, - }, - SampleExpr: &RangeAggregationExpr{ - Left: &LogRange{ -diff --git a/pkg/logql/shardmapper_test.go b/pkg/logql/shardmapper_test.go -index 61d4c54ee8418..953bd42f8cc38 100644 ---- a/pkg/logql/shardmapper_test.go -+++ b/pkg/logql/shardmapper_test.go -@@ -686,7 +686,7 @@ func TestMapping(t *testing.T) { - Op: OpTypeAdd, - Opts: &BinOpOptions{ - ReturnBool: false, -- VectorMatching: nil, -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, - }, - SampleExpr: &LiteralExpr{value: 1}, - RHS: &VectorAggregationExpr{ -@@ -864,7 +864,7 @@ func TestMapping(t *testing.T) { - Op: OpTypeDiv, - Opts: &BinOpOptions{ - ReturnBool: false, -- VectorMatching: nil, -+ VectorMatching: &VectorMatching{Card: CardOneToOne}, - }, - SampleExpr: &VectorAggregationExpr{ - Operation: OpTypeMax, -@@ -987,8 +987,8 @@ func TestMapping(t *testing.T) { - Opts: &BinOpOptions{ - ReturnBool: false, - VectorMatching: &VectorMatching{ -- On: false, -- Include: []string{""cluster""}, -+ On: false, -+ MatchingLabels: []string{""cluster""}, - }, - }, - SampleExpr: &VectorAggregationExpr{",unknown,"add group_{left,right} to LogQL (#4510) - -* Add group_left/group_right support - -* Update docs/sources/logql/_index.md - -* Minor change for a test case - -* removes CardManyToMany - -* removes now-unused IsSetOperator helper - -Co-authored-by: Owen Diehl " -b9c9394dc1f8f7fa885306948393750b08bb10d2,2023-09-21 21:20:06,Karsten Jeschkies,Define sketches for quantiles. (#10659),False,"diff --git a/go.mod b/go.mod -index 3d7d86ce577b9..60e6c113e8d56 100644 ---- a/go.mod -+++ b/go.mod -@@ -113,6 +113,7 @@ require ( - - require ( - github.com/Azure/go-autorest/autorest v0.11.29 -+ github.com/DataDog/sketches-go v1.4.2 - github.com/DmitriyVTitov/size v1.5.0 - github.com/IBM/go-sdk-core/v5 v5.13.1 - github.com/IBM/ibm-cos-sdk-go v1.10.0 -@@ -120,6 +121,7 @@ require ( - github.com/fsnotify/fsnotify v1.6.0 - github.com/grafana/loki/pkg/push v0.0.0-20230127102416-571f88bc5765 - github.com/heroku/x v0.0.61 -+ github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b - github.com/owen-d/BoomFilters v0.0.0-20230914145927-1ad00a0ec6fd - github.com/prometheus/alertmanager v0.26.0 - github.com/prometheus/common/sigv4 v0.1.0 -diff --git a/go.sum b/go.sum -index 92f64c0a3a729..9b4ead52ed30e 100644 ---- a/go.sum -+++ b/go.sum -@@ -478,6 +478,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 - github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= - github.com/DataDog/datadog-go v0.0.0-20160329135253-cc2f4770f4d6/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= - github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -+github.com/DataDog/sketches-go v1.4.2 h1:gppNudE9d19cQ98RYABOetxIhpTCl4m7CnbRZjvVA/o= -+github.com/DataDog/sketches-go v1.4.2/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= - github.com/DmitriyVTitov/size v1.5.0 h1:/PzqxYrOyOUX1BXj6J9OuVRVGe+66VL4D9FlUaW515g= - github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0= - github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= -@@ -1212,6 +1214,8 @@ github.com/influxdata/go-syslog/v3 v3.0.1-0.20201128200927-a1889d947b48 h1:0WbZ+ - github.com/influxdata/go-syslog/v3 v3.0.1-0.20201128200927-a1889d947b48/go.mod h1:aXdIdfn2OcGnMhOTojXmwZqXKgC3MU5riiNvzwwG9OY= - github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= - github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4/go.mod h1:VeiWgI3qaGdJWust2fP27a6J+koITo/1c/UhxeOxgaM= -+github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b h1:i44CesU68ZBRvtCjBi3QSosCIKrjmMbYlQMFAwVLds4= -+github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= - github.com/influxdata/telegraf v1.16.3 h1:x0qeuSGGMg5y+YqP/5ZHwXZu3bcBrO8AAQOTNlYEb1c= - github.com/influxdata/telegraf v1.16.3/go.mod h1:fX/6k7qpIqzVPWyeIamb0wN5hbwc0ANUaTS80lPYFB8= - github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8= -@@ -2235,7 +2239,10 @@ golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNq - golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4= - golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:UdS9frhv65KTfwxME1xE8+rHYoFpbm36gOud1GhBe9c= - gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -+gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -+gonum.org/v1/gonum v0.6.2 h1:4r+yNT0+8SWcOkXP+63H2zQbN+USnC73cjGUxnDF94Q= - gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= -+gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= - gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= - gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= - google.golang.org/api v0.0.0-20180829000535-087779f1d2c9/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go -index 667c838e05b5b..14d590cc26919 100644 ---- a/pkg/logproto/logproto.pb.go -+++ b/pkg/logproto/logproto.pb.go -@@ -1258,6 +1258,7 @@ func (m *LabelPair) GetValue() string { - } - - // LegacyLabelPair exists for backwards compatibility reasons and is deprecated. Do not use. -+// Use LabelPair instead. - type LegacyLabelPair struct { - Name []byte `protobuf:""bytes,1,opt,name=name,proto3"" json:""name,omitempty""` - Value []byte `protobuf:""bytes,2,opt,name=value,proto3"" json:""value,omitempty""` -diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto -index f80e102a2d9b7..53137c8de5a05 100644 ---- a/pkg/logproto/logproto.proto -+++ b/pkg/logproto/logproto.proto -@@ -213,6 +213,7 @@ message LabelPair { - } - - // LegacyLabelPair exists for backwards compatibility reasons and is deprecated. Do not use. -+// Use LabelPair instead. - message LegacyLabelPair { - bytes name = 1; - bytes value = 2; -diff --git a/pkg/logproto/sketch.pb.go b/pkg/logproto/sketch.pb.go -index d37bb0e5615fc..4a56552d984e8 100644 ---- a/pkg/logproto/sketch.pb.go -+++ b/pkg/logproto/sketch.pb.go -@@ -5,7 +5,9 @@ package logproto - - import ( - bytes ""bytes"" -+ encoding_binary ""encoding/binary"" - fmt ""fmt"" -+ _ ""github.com/gogo/protobuf/gogoproto"" - proto ""github.com/gogo/protobuf/proto"" - io ""io"" - math ""math"" -@@ -25,6 +27,355 @@ var _ = math.Inf - // proto package needs to be updated. - const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -+type QuantileSketchMatrix struct { -+ Values []*QuantileSketchVector `protobuf:""bytes,1,rep,name=values,proto3"" json:""values,omitempty""` -+} -+ -+func (m *QuantileSketchMatrix) Reset() { *m = QuantileSketchMatrix{} } -+func (*QuantileSketchMatrix) ProtoMessage() {} -+func (*QuantileSketchMatrix) Descriptor() ([]byte, []int) { -+ return fileDescriptor_7f9fd40e59b87ff3, []int{0} -+} -+func (m *QuantileSketchMatrix) XXX_Unmarshal(b []byte) error { -+ return m.Unmarshal(b) -+} -+func (m *QuantileSketchMatrix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { -+ if deterministic { -+ return xxx_messageInfo_QuantileSketchMatrix.Marshal(b, m, deterministic) -+ } else { -+ b = b[:cap(b)] -+ n, err := m.MarshalToSizedBuffer(b) -+ if err != nil { -+ return nil, err -+ } -+ return b[:n], nil -+ } -+} -+func (m *QuantileSketchMatrix) XXX_Merge(src proto.Message) { -+ xxx_messageInfo_QuantileSketchMatrix.Merge(m, src) -+} -+func (m *QuantileSketchMatrix) XXX_Size() int { -+ return m.Size() -+} -+func (m *QuantileSketchMatrix) XXX_DiscardUnknown() { -+ xxx_messageInfo_QuantileSketchMatrix.DiscardUnknown(m) -+} -+ -+var xxx_messageInfo_QuantileSketchMatrix proto.InternalMessageInfo -+ -+func (m *QuantileSketchMatrix) GetValues() []*QuantileSketchVector { -+ if m != nil { -+ return m.Values -+ } -+ return nil -+} -+ -+type QuantileSketchVector struct { -+ Samples []*QuantileSketchSample `protobuf:""bytes,1,rep,name=samples,proto3"" json:""samples,omitempty""` -+} -+ -+func (m *QuantileSketchVector) Reset() { *m = QuantileSketchVector{} } -+func (*QuantileSketchVector) ProtoMessage() {} -+func (*QuantileSketchVector) Descriptor() ([]byte, []int) { -+ return fileDescriptor_7f9fd40e59b87ff3, []int{1} -+} -+func (m *QuantileSketchVector) XXX_Unmarshal(b []byte) error { -+ return m.Unmarshal(b) -+} -+func (m *QuantileSketchVector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { -+ if deterministic { -+ return xxx_messageInfo_QuantileSketchVector.Marshal(b, m, deterministic) -+ } else { -+ b = b[:cap(b)] -+ n, err := m.MarshalToSizedBuffer(b) -+ if err != nil { -+ return nil, err -+ } -+ return b[:n], nil -+ } -+} -+func (m *QuantileSketchVector) XXX_Merge(src proto.Message) { -+ xxx_messageInfo_QuantileSketchVector.Merge(m, src) -+} -+func (m *QuantileSketchVector) XXX_Size() int { -+ return m.Size() -+} -+func (m *QuantileSketchVector) XXX_DiscardUnknown() { -+ xxx_messageInfo_QuantileSketchVector.DiscardUnknown(m) -+} -+ -+var xxx_messageInfo_QuantileSketchVector proto.InternalMessageInfo -+ -+func (m *QuantileSketchVector) GetSamples() []*QuantileSketchSample { -+ if m != nil { -+ return m.Samples -+ } -+ return nil -+} -+ -+type QuantileSketchSample struct { -+ F *QuantileSketch `protobuf:""bytes,1,opt,name=f,proto3"" json:""f,omitempty""` -+ TimestampMs int64 `protobuf:""varint,2,opt,name=timestamp_ms,json=timestampMs,proto3"" json:""timestamp_ms,omitempty""` -+ Metric []*LabelPair `protobuf:""bytes,3,rep,name=metric,proto3"" json:""metric,omitempty""` -+} -+ -+func (m *QuantileSketchSample) Reset() { *m = QuantileSketchSample{} } -+func (*QuantileSketchSample) ProtoMessage() {} -+func (*QuantileSketchSample) Descriptor() ([]byte, []int) { -+ return fileDescriptor_7f9fd40e59b87ff3, []int{2} -+} -+func (m *QuantileSketchSample) XXX_Unmarshal(b []byte) error { -+ return m.Unmarshal(b) -+} -+func (m *QuantileSketchSample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { -+ if deterministic { -+ return xxx_messageInfo_QuantileSketchSample.Marshal(b, m, deterministic) -+ } else { -+ b = b[:cap(b)] -+ n, err := m.MarshalToSizedBuffer(b) -+ if err != nil { -+ return nil, err -+ } -+ return b[:n], nil -+ } -+} -+func (m *QuantileSketchSample) XXX_Merge(src proto.Message) { -+ xxx_messageInfo_QuantileSketchSample.Merge(m, src) -+} -+func (m *QuantileSketchSample) XXX_Size() int { -+ return m.Size() -+} -+func (m *QuantileSketchSample) XXX_DiscardUnknown() { -+ xxx_messageInfo_QuantileSketchSample.DiscardUnknown(m) -+} -+ -+var xxx_messageInfo_QuantileSketchSample proto.InternalMessageInfo -+ -+func (m *QuantileSketchSample) GetF() *QuantileSketch { -+ if m != nil { -+ return m.F -+ } -+ return nil -+} -+ -+func (m *QuantileSketchSample) GetTimestampMs() int64 { -+ if m != nil { -+ return m.TimestampMs -+ } -+ return 0 -+} -+ -+func (m *QuantileSketchSample) GetMetric() []*LabelPair { -+ if m != nil { -+ return m.Metric -+ } -+ return nil -+} -+ -+type QuantileSketch struct { -+ // Types that are valid to be assigned to Sketch: -+ // *QuantileSketch_Tdigest -+ // *QuantileSketch_Ddsketch -+ Sketch isQuantileSketch_Sketch `protobuf_oneof:""sketch""` -+} -+ -+func (m *QuantileSketch) Reset() { *m = QuantileSketch{} } -+func (*QuantileSketch) ProtoMessage() {} -+func (*QuantileSketch) Descriptor() ([]byte, []int) { -+ return fileDescriptor_7f9fd40e59b87ff3, []int{3} -+} -+func (m *QuantileSketch) XXX_Unmarshal(b []byte) error { -+ return m.Unmarshal(b) -+} -+func (m *QuantileSketch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { -+ if deterministic { -+ return xxx_messageInfo_QuantileSketch.Marshal(b, m, deterministic) -+ } else { -+ b = b[:cap(b)] -+ n, err := m.MarshalToSizedBuffer(b) -+ if err != nil { -+ return nil, err -+ } -+ return b[:n], nil -+ } -+} -+func (m *QuantileSketch) XXX_Merge(src proto.Message) { -+ xxx_messageInfo_QuantileSketch.Merge(m, src) -+} -+func (m *QuantileSketch) XXX_Size() int { -+ return m.Size() -+} -+func (m *QuantileSketch) XXX_DiscardUnknown() { -+ xxx_messageInfo_QuantileSketch.DiscardUnknown(m) -+} -+ -+var xxx_messageInfo_QuantileSketch proto.InternalMessageInfo -+ -+type isQuantileSketch_Sketch interface { -+ isQuantileSketch_Sketch() -+ Equal(interface{}) bool -+ MarshalTo([]byte) (int, error) -+ Size() int -+} -+ -+type QuantileSketch_Tdigest struct { -+ Tdigest *TDigest `protobuf:""bytes,1,opt,name=tdigest,proto3,oneof""` -+} -+type QuantileSketch_Ddsketch struct { -+ Ddsketch []byte `protobuf:""bytes,2,opt,name=ddsketch,proto3,oneof""` -+} -+ -+func (*QuantileSketch_Tdigest) isQuantileSketch_Sketch() {} -+func (*QuantileSketch_Ddsketch) isQuantileSketch_Sketch() {} -+ -+func (m *QuantileSketch) GetSketch() isQuantileSketch_Sketch { -+ if m != nil { -+ return m.Sketch -+ } -+ return nil -+} -+ -+func (m *QuantileSketch) GetTdigest() *TDigest { -+ if x, ok := m.GetSketch().(*QuantileSketch_Tdigest); ok { -+ return x.Tdigest -+ } -+ return nil -+} -+ -+func (m *QuantileSketch) GetDdsketch() []byte { -+ if x, ok := m.GetSketch().(*QuantileSketch_Ddsketch); ok { -+ return x.Ddsketch -+ } -+ return nil -+} -+ -+// XXX_OneofWrappers is for the internal use of the proto package. -+func (*QuantileSketch) XXX_OneofWrappers() []interface{} { -+ return []interface{}{ -+ (*QuantileSketch_Tdigest)(nil), -+ (*QuantileSketch_Ddsketch)(nil), -+ } -+} -+ -+// ""Large"" bytes format from https://github.com/tdunning/t-digest -+type TDigest struct { -+ Min float64 `protobuf:""fixed64,1,opt,name=min,proto3"" json:""min,omitempty""` -+ Max float64 `protobuf:""fixed64,2,opt,name=max,proto3"" json:""max,omitempty""` -+ Compression float64 `protobuf:""fixed64,3,opt,name=compression,proto3"" json:""compression,omitempty""` -+ Processed []*TDigest_Centroid `protobuf:""bytes,4,rep,name=processed,proto3"" json:""processed,omitempty""` -+} -+ -+func (m *TDigest) Reset() { *m = TDigest{} } -+func (*TDigest) ProtoMessage() {} -+func (*TDigest) Descriptor() ([]byte, []int) { -+ return fileDescriptor_7f9fd40e59b87ff3, []int{4} -+} -+func (m *TDigest) XXX_Unmarshal(b []byte) error { -+ return m.Unmarshal(b) -+} -+func (m *TDigest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { -+ if deterministic { -+ return xxx_messageInfo_TDigest.Marshal(b, m, deterministic) -+ } else { -+ b = b[:cap(b)] -+ n, err := m.MarshalToSizedBuffer(b) -+ if err != nil { -+ return nil, err -+ } -+ return b[:n], nil -+ } -+} -+func (m *TDigest) XXX_Merge(src proto.Message) { -+ xxx_messageInfo_TDigest.Merge(m, src) -+} -+func (m *TDigest) XXX_Size() int { -+ return m.Size() -+} -+func (m *TDigest) XXX_DiscardUnknown() { -+ xxx_messageInfo_TDigest.DiscardUnknown(m) -+} -+ -+var xxx_messageInfo_TDigest proto.InternalMessageInfo -+ -+func (m *TDigest) GetMin() float64 { -+ if m != nil { -+ return m.Min -+ } -+ return 0 -+} -+ -+func (m *TDigest) GetMax() float64 { -+ if m != nil { -+ return m.Max -+ } -+ return 0 -+} -+ -+func (m *TDigest) GetCompression() float64 { -+ if m != nil { -+ return m.Compression -+ } -+ return 0 -+} -+ -+func (m *TDigest) GetProcessed() []*TDigest_Centroid { -+ if m != nil { -+ return m.Processed -+ } -+ return nil -+} -+ -+type TDigest_Centroid struct { -+ Mean float64 `protobuf:""fixed64,1,opt,name=mean,proto3"" json:""mean,omitempty""` -+ Weight float64 `protobuf:""fixed64,2,opt,name=weight,proto3"" json:""weight,omitempty""` -+} -+ -+func (m *TDigest_Centroid) Reset() { *m = TDigest_Centroid{} } -+func (*TDigest_Centroid) ProtoMessage() {} -+func (*TDigest_Centroid) Descriptor() ([]byte, []int) { -+ return fileDescriptor_7f9fd40e59b87ff3, []int{4, 0} -+} -+func (m *TDigest_Centroid) XXX_Unmarshal(b []byte) error { -+ return m.Unmarshal(b) -+} -+func (m *TDigest_Centroid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { -+ if deterministic { -+ return xxx_messageInfo_TDigest_Centroid.Marshal(b, m, deterministic) -+ } else { -+ b = b[:cap(b)] -+ n, err := m.MarshalToSizedBuffer(b) -+ if err != nil { -+ return nil, err -+ } -+ return b[:n], nil -+ } -+} -+func (m *TDigest_Centroid) XXX_Merge(src proto.Message) { -+ xxx_messageInfo_TDigest_Centroid.Merge(m, src) -+} -+func (m *TDigest_Centroid) XXX_Size() int { -+ return m.Size() -+} -+func (m *TDigest_Centroid) XXX_DiscardUnknown() { -+ xxx_messageInfo_TDigest_Centroid.DiscardUnknown(m) -+} -+ -+var xxx_messageInfo_TDigest_Centroid proto.InternalMessageInfo -+ -+func (m *TDigest_Centroid) GetMean() float64 { -+ if m != nil { -+ return m.Mean -+ } -+ return 0 -+} -+ -+func (m *TDigest_Centroid) GetWeight() float64 { -+ if m != nil { -+ return m.Weight -+ } -+ return 0 -+} -+ - type CountMinSketch struct { - Depth uint32 `protobuf:""varint,1,opt,name=depth,proto3"" json:""depth,omitempty""` - Width uint32 `protobuf:""varint,2,opt,name=width,proto3"" json:""width,omitempty""` -@@ -35,7 +386,7 @@ type CountMinSketch struct { - func (m *CountMinSketch) Reset() { *m = CountMinSketch{} } - func (*CountMinSketch) ProtoMessage() {} - func (*CountMinSketch) Descriptor() ([]byte, []int) { -- return fileDescriptor_7f9fd40e59b87ff3, []int{0} -+ return fileDescriptor_7f9fd40e59b87ff3, []int{5} - } - func (m *CountMinSketch) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -@@ -94,7 +445,7 @@ type TopK struct { - func (m *TopK) Reset() { *m = TopK{} } - func (*TopK) ProtoMessage() {} - func (*TopK) Descriptor() ([]byte, []int) { -- return fileDescriptor_7f9fd40e59b87ff3, []int{1} -+ return fileDescriptor_7f9fd40e59b87ff3, []int{6} - } - func (m *TopK) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -@@ -152,7 +503,7 @@ type TopK_Pair struct { - func (m *TopK_Pair) Reset() { *m = TopK_Pair{} } - func (*TopK_Pair) ProtoMessage() {} - func (*TopK_Pair) Descriptor() ([]byte, []int) { -- return fileDescriptor_7f9fd40e59b87ff3, []int{1, 0} -+ return fileDescriptor_7f9fd40e59b87ff3, []int{6, 0} - } - func (m *TopK_Pair) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -@@ -202,7 +553,7 @@ type TopKMatrix struct { - func (m *TopKMatrix) Reset() { *m = TopKMatrix{} } - func (*TopKMatrix) ProtoMessage() {} - func (*TopKMatrix) Descriptor() ([]byte, []int) { -- return fileDescriptor_7f9fd40e59b87ff3, []int{2} -+ return fileDescriptor_7f9fd40e59b87ff3, []int{7} - } - func (m *TopKMatrix) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -@@ -246,7 +597,7 @@ type TopKMatrix_Vector struct { - func (m *TopKMatrix_Vector) Reset() { *m = TopKMatrix_Vector{} } - func (*TopKMatrix_Vector) ProtoMessage() {} - func (*TopKMatrix_Vector) Descriptor() ([]byte, []int) { -- return fileDescriptor_7f9fd40e59b87ff3, []int{2, 0} -+ return fileDescriptor_7f9fd40e59b87ff3, []int{7, 0} - } - func (m *TopKMatrix_Vector) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -@@ -290,6 +641,12 @@ func (m *TopKMatrix_Vector) GetTimestampMs() int64 { - } - - func init() { -+ proto.RegisterType((*QuantileSketchMatrix)(nil), ""logproto.QuantileSketchMatrix"") -+ proto.RegisterType((*QuantileSketchVector)(nil), ""logproto.QuantileSketchVector"") -+ proto.RegisterType((*QuantileSketchSample)(nil), ""logproto.QuantileSketchSample"") -+ proto.RegisterType((*QuantileSketch)(nil), ""logproto.QuantileSketch"") -+ proto.RegisterType((*TDigest)(nil), ""logproto.TDigest"") -+ proto.RegisterType((*TDigest_Centroid)(nil), ""logproto.TDigest.Centroid"") - proto.RegisterType((*CountMinSketch)(nil), ""logproto.CountMinSketch"") - proto.RegisterType((*TopK)(nil), ""logproto.TopK"") - proto.RegisterType((*TopK_Pair)(nil), ""logproto.TopK.Pair"") -@@ -300,42 +657,57 @@ func init() { - func init() { proto.RegisterFile(""pkg/logproto/sketch.proto"", fileDescriptor_7f9fd40e59b87ff3) } - - var fileDescriptor_7f9fd40e59b87ff3 = []byte{ -- // 391 bytes of a gzipped FileDescriptorProto -- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xcf, 0x6e, 0xda, 0x30, -- 0x1c, 0xc7, 0x63, 0x92, 0x21, 0xe6, 0x00, 0x07, 0x6f, 0x87, 0x8c, 0x49, 0x56, 0x16, 0x4d, 0x5a, -- 0xb4, 0x43, 0x22, 0xc1, 0x1b, 0x6c, 0xc7, 0x09, 0x6d, 0xf2, 0xaa, 0xaa, 0xe2, 0x52, 0x85, 0xe0, -- 0x26, 0x56, 0xfe, 0x38, 0x4a, 0x0c, 0x6d, 0x6f, 0x7d, 0x84, 0xaa, 0x4f, 0xd1, 0x6b, 0xdf, 0xa2, -- 0x47, 0x8e, 0x1c, 0x4b, 0xb8, 0xf4, 0xc8, 0x23, 0x54, 0x76, 0x80, 0xc2, 0x29, 0xfa, 0x7e, 0xf3, -- 0xb1, 0x7f, 0x9f, 0x9f, 0x64, 0xf8, 0xa5, 0x48, 0x22, 0x3f, 0xe5, 0x51, 0x51, 0x72, 0xc1, 0xfd, -- 0x2a, 0xa1, 0x22, 0x8c, 0x3d, 0x15, 0x50, 0x67, 0x5f, 0x3b, 0x17, 0xb0, 0xff, 0x9b, 0xcf, 0x73, -- 0x31, 0x66, 0xf9, 0x7f, 0x45, 0xa0, 0xcf, 0xf0, 0xc3, 0x8c, 0x16, 0x22, 0xb6, 0x80, 0x0d, 0xdc, -- 0x1e, 0x69, 0x82, 0x6c, 0xaf, 0xd9, 0x4c, 0xc4, 0x56, 0xab, 0x69, 0x55, 0x40, 0x03, 0xd8, 0x09, -- 0xe5, 0x69, 0x5a, 0x56, 0x96, 0x6e, 0xeb, 0x6e, 0x8f, 0x1c, 0xb2, 0xf3, 0x04, 0xa0, 0x71, 0xc6, -- 0x8b, 0x3f, 0xe8, 0x27, 0xd4, 0xc3, 0xac, 0x52, 0xd7, 0x99, 0x43, 0xcb, 0xdb, 0x8f, 0xf6, 0x4e, -- 0xe7, 0x12, 0x09, 0xa1, 0x1f, 0xd0, 0x48, 0x59, 0x25, 0xac, 0x96, 0xad, 0xbb, 0xe6, 0xf0, 0xd3, -- 0x3b, 0x2c, 0x6f, 0xf2, 0xfe, 0x05, 0xac, 0x24, 0x0a, 0x40, 0x36, 0x34, 0xe3, 0xdb, 0x82, 0x96, -- 0x29, 0x8f, 0x52, 0x1e, 0x59, 0xba, 0x0d, 0xdc, 0x2e, 0x39, 0xae, 0x06, 0x43, 0x68, 0x48, 0x5e, -- 0x9a, 0xd3, 0x05, 0xcd, 0x85, 0x12, 0xf8, 0x48, 0x9a, 0x20, 0x5b, 0x65, 0xba, 0xdf, 0x47, 0x05, -- 0xe7, 0x01, 0x40, 0x28, 0x27, 0x8d, 0x03, 0x51, 0xb2, 0x1b, 0x34, 0x82, 0xed, 0x45, 0x90, 0xce, -- 0xa9, 0x94, 0x97, 0x3e, 0x5f, 0x4f, 0x7d, 0x1a, 0xca, 0x3b, 0xa7, 0xa1, 0xe0, 0x25, 0xd9, 0xa1, -- 0x83, 0xbf, 0xb0, 0xdd, 0x34, 0xc8, 0x81, 0x86, 0xe0, 0x45, 0xb2, 0xdb, 0xbc, 0x7f, 0x7a, 0x98, -- 0xa8, 0x7f, 0xe8, 0x1b, 0xec, 0x0a, 0x96, 0xd1, 0x4a, 0x04, 0x59, 0x71, 0x99, 0x55, 0x4a, 0x47, -- 0x27, 0xe6, 0xa1, 0x1b, 0x57, 0xbf, 0x26, 0xcb, 0x35, 0xd6, 0x56, 0x6b, 0xac, 0x6d, 0xd7, 0x18, -- 0xdc, 0xd5, 0x18, 0x3c, 0xd6, 0x18, 0x3c, 0xd7, 0x18, 0x2c, 0x6b, 0x0c, 0x5e, 0x6a, 0x0c, 0x5e, -- 0x6b, 0xac, 0x6d, 0x6b, 0x0c, 0xee, 0x37, 0x58, 0x5b, 0x6e, 0xb0, 0xb6, 0xda, 0x60, 0x6d, 0xf2, -- 0x3d, 0x62, 0x22, 0x9e, 0x4f, 0xbd, 0x90, 0x67, 0x7e, 0x54, 0x06, 0x57, 0x41, 0x1e, 0xf8, 0x29, -- 0x4f, 0x98, 0x7f, 0xfc, 0x2a, 0xa6, 0x6d, 0xf5, 0x19, 0xbd, 0x05, 0x00, 0x00, 0xff, 0xff, 0x4c, -- 0x14, 0x1f, 0xbe, 0x2c, 0x02, 0x00, 0x00, --} -- --func (this *CountMinSketch) Equal(that interface{}) bool { -+ // 632 bytes of a gzipped FileDescriptorProto -+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x41, 0x4f, 0xd4, 0x4e, -+ 0x14, 0xef, 0xfc, 0x77, 0xff, 0xcb, 0xf2, 0x16, 0x88, 0x8e, 0xc4, 0xac, 0xc5, 0x4c, 0xd6, 0xc6, -+ 0x28, 0xd1, 0xb8, 0x9b, 0x40, 0x42, 0x38, 0x83, 0x07, 0x12, 0x45, 0x71, 0x20, 0xc6, 0x70, 0x31, -+ 0xa5, 0x1d, 0xba, 0x93, 0x6d, 0x3b, 0x4d, 0x67, 0x16, 0xf0, 0xe6, 0x27, 0x30, 0xc6, 0x4f, 0xe1, -+ 0xd5, 0x8f, 0xe0, 0xcd, 0x23, 0x47, 0x8e, 0x52, 0x2e, 0x1e, 0xf9, 0x08, 0x66, 0x66, 0xda, 0x85, -+ 0x2e, 0x31, 0x7a, 0xda, 0xf7, 0x7e, 0xef, 0xf7, 0x7e, 0xf3, 0x9b, 0x79, 0x7d, 0x0b, 0xf7, 0xb2, -+ 0x51, 0x34, 0x88, 0x45, 0x94, 0xe5, 0x42, 0x89, 0x81, 0x1c, 0x31, 0x15, 0x0c, 0xfb, 0x26, 0xc1, -+ 0xed, 0x0a, 0x76, 0x17, 0x23, 0x11, 0x09, 0xcb, 0xd0, 0x91, 0xad, 0xbb, 0x4b, 0xb5, 0xd6, 0x2a, -+ 0xb0, 0x45, 0xef, 0x15, 0x2c, 0xbe, 0x19, 0xfb, 0xa9, 0xe2, 0x31, 0xdb, 0x35, 0xa2, 0xdb, 0xbe, -+ 0xca, 0xf9, 0x09, 0x5e, 0x83, 0xd6, 0x91, 0x1f, 0x8f, 0x99, 0xec, 0xa2, 0x5e, 0x63, 0xb9, 0xb3, -+ 0x42, 0xfa, 0x93, 0xc6, 0x3a, 0xff, 0x2d, 0x0b, 0x94, 0xc8, 0x69, 0xc9, 0xf6, 0x76, 0xa6, 0xf5, -+ 0x6c, 0x1d, 0xaf, 0xc3, 0x8c, 0xf4, 0x93, 0x2c, 0xfe, 0xbb, 0xe0, 0xae, 0xa1, 0xd1, 0x8a, 0xee, -+ 0x7d, 0x42, 0xd3, 0x92, 0x96, 0x81, 0x1f, 0x01, 0x3a, 0xec, 0xa2, 0x1e, 0x5a, 0xee, 0xac, 0x74, -+ 0xff, 0x24, 0x46, 0xd1, 0x21, 0x7e, 0x00, 0x73, 0x8a, 0x27, 0x4c, 0x2a, 0x3f, 0xc9, 0xde, 0x27, -+ 0xb2, 0xfb, 0x5f, 0x0f, 0x2d, 0x37, 0x68, 0x67, 0x82, 0x6d, 0x4b, 0xfc, 0x14, 0x5a, 0x09, 0x53, -+ 0x39, 0x0f, 0xba, 0x0d, 0x63, 0xee, 0xce, 0x95, 0xde, 0x4b, 0xff, 0x80, 0xc5, 0x3b, 0x3e, 0xcf, -+ 0x69, 0x49, 0xf1, 0x22, 0x58, 0xa8, 0x1f, 0x82, 0x9f, 0xc1, 0x8c, 0x0a, 0x79, 0xc4, 0xa4, 0x2a, -+ 0xfd, 0xdc, 0xbe, 0xea, 0xdf, 0x7b, 0x6e, 0x0a, 0x5b, 0x0e, 0xad, 0x38, 0xf8, 0x3e, 0xb4, 0xc3, -+ 0xd0, 0x8e, 0xd0, 0x98, 0x99, 0xdb, 0x72, 0xe8, 0x04, 0xd9, 0x68, 0x43, 0xcb, 0x46, 0xde, 0x77, -+ 0x04, 0x33, 0x65, 0x3b, 0xbe, 0x05, 0x8d, 0x84, 0xa7, 0x46, 0x1e, 0x51, 0x1d, 0x1a, 0xc4, 0x3f, -+ 0x31, 0x02, 0x1a, 0xf1, 0x4f, 0x70, 0x0f, 0x3a, 0x81, 0x48, 0xb2, 0x9c, 0x49, 0xc9, 0x45, 0xda, -+ 0x6d, 0x98, 0xca, 0x75, 0x08, 0xaf, 0xc3, 0x6c, 0x96, 0x8b, 0x80, 0x49, 0xc9, 0xc2, 0x6e, 0xd3, -+ 0x5c, 0xd5, 0xbd, 0x61, 0xb5, 0xbf, 0xc9, 0x52, 0x95, 0x0b, 0x1e, 0xd2, 0x2b, 0xb2, 0xbb, 0x06, -+ 0xed, 0x0a, 0xc6, 0x18, 0x9a, 0x09, 0xf3, 0x2b, 0x33, 0x26, 0xc6, 0x77, 0xa1, 0x75, 0xcc, 0x78, -+ 0x34, 0x54, 0xa5, 0xa1, 0x32, 0xf3, 0xde, 0xc1, 0xc2, 0xa6, 0x18, 0xa7, 0x6a, 0x9b, 0xa7, 0xe5, -+ 0x63, 0x2d, 0xc2, 0xff, 0x21, 0xcb, 0xd4, 0xd0, 0xb4, 0xcf, 0x53, 0x9b, 0x68, 0xf4, 0x98, 0x87, -+ 0xca, 0x3e, 0xc8, 0x3c, 0xb5, 0x09, 0x76, 0xa1, 0x1d, 0xe8, 0x6e, 0x96, 0x4b, 0x33, 0x99, 0x79, -+ 0x3a, 0xc9, 0xbd, 0x6f, 0x08, 0x9a, 0x7b, 0x22, 0x7b, 0x81, 0x9f, 0x40, 0x23, 0x48, 0xe4, 0xcd, -+ 0x2f, 0xa1, 0x7e, 0x2e, 0xd5, 0x24, 0xfc, 0x18, 0x9a, 0x31, 0x97, 0xda, 0xe4, 0xd4, 0x98, 0xb5, -+ 0x52, 0xdf, 0x8c, 0xd9, 0x10, 0xf4, 0x5b, 0x0e, 0x3f, 0x64, 0x2c, 0x8f, 0x45, 0x14, 0x8b, 0xc8, -+ 0xbc, 0xe5, 0x1c, 0xbd, 0x0e, 0xb9, 0x2b, 0xd0, 0xd4, 0x7c, 0xed, 0x9c, 0x1d, 0xb1, 0xd4, 0x8e, -+ 0x7e, 0x96, 0xda, 0x44, 0xa3, 0xc6, 0x69, 0x75, 0x1f, 0x93, 0x78, 0x5f, 0x10, 0x80, 0x3e, 0xa9, -+ 0x5c, 0xb2, 0xd5, 0xa9, 0x25, 0x5b, 0xaa, 0xfb, 0xb1, 0xac, 0x7e, 0x7d, 0xc3, 0xdc, 0xd7, 0xd0, -+ 0x2a, 0x77, 0xca, 0x83, 0xa6, 0x12, 0xd9, 0xa8, 0xbc, 0xf9, 0x42, 0xbd, 0x99, 0x9a, 0xda, 0x3f, -+ 0x7c, 0xfc, 0x1b, 0xfb, 0xa7, 0xe7, 0xc4, 0x39, 0x3b, 0x27, 0xce, 0xe5, 0x39, 0x41, 0x1f, 0x0b, -+ 0x82, 0xbe, 0x16, 0x04, 0xfd, 0x28, 0x08, 0x3a, 0x2d, 0x08, 0xfa, 0x59, 0x10, 0xf4, 0xab, 0x20, -+ 0xce, 0x65, 0x41, 0xd0, 0xe7, 0x0b, 0xe2, 0x9c, 0x5e, 0x10, 0xe7, 0xec, 0x82, 0x38, 0xfb, 0x0f, -+ 0x23, 0xae, 0x86, 0xe3, 0x83, 0x7e, 0x20, 0x92, 0x41, 0x94, 0xfb, 0x87, 0x7e, 0xea, 0x0f, 0x62, -+ 0x31, 0xe2, 0x83, 0xeb, 0xff, 0x36, 0x07, 0x2d, 0xf3, 0xb3, 0xfa, 0x3b, 0x00, 0x00, 0xff, 0xff, -+ 0xa9, 0x7c, 0xb5, 0x30, 0xbf, 0x04, 0x00, 0x00, -+} -+ -+func (this *QuantileSketchMatrix) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - -- that1, ok := that.(*CountMinSketch) -+ that1, ok := that.(*QuantileSketchMatrix) - if !ok { -- that2, ok := that.(CountMinSketch) -+ that2, ok := that.(QuantileSketchMatrix) - if ok { - that1 = &that2 - } else { -@@ -347,30 +719,24 @@ func (this *CountMinSketch) Equal(that interface{}) bool { - } else if this == nil { - return false - } -- if this.Depth != that1.Depth { -- return false -- } -- if this.Width != that1.Width { -- return false -- } -- if len(this.Counters) != len(that1.Counters) { -+ if len(this.Values) != len(that1.Values) { - return false - } -- for i := range this.Counters { -- if this.Counters[i] != that1.Counters[i] { -+ for i := range this.Values { -+ if !this.Values[i].Equal(that1.Values[i]) { - return false - } - } - return true - } --func (this *TopK) Equal(that interface{}) bool { -+func (this *QuantileSketchVector) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - -- that1, ok := that.(*TopK) -+ that1, ok := that.(*QuantileSketchVector) - if !ok { -- that2, ok := that.(TopK) -+ that2, ok := that.(QuantileSketchVector) - if ok { - that1 = &that2 - } else { -@@ -382,30 +748,24 @@ func (this *TopK) Equal(that interface{}) bool { - } else if this == nil { - return false - } -- if !this.Cms.Equal(that1.Cms) { -- return false -- } -- if len(this.List) != len(that1.List) { -+ if len(this.Samples) != len(that1.Samples) { - return false - } -- for i := range this.List { -- if !this.List[i].Equal(that1.List[i]) { -+ for i := range this.Samples { -+ if !this.Samples[i].Equal(that1.Samples[i]) { - return false - } - } -- if !bytes.Equal(this.Hyperloglog, that1.Hyperloglog) { -- return false -- } - return true - } --func (this *TopK_Pair) Equal(that interface{}) bool { -+func (this *QuantileSketchSample) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - -- that1, ok := that.(*TopK_Pair) -+ that1, ok := that.(*QuantileSketchSample) - if !ok { -- that2, ok := that.(TopK_Pair) -+ that2, ok := that.(QuantileSketchSample) - if ok { - that1 = &that2 - } else { -@@ -417,22 +777,30 @@ func (this *TopK_Pair) Equal(that interface{}) bool { - } else if this == nil { - return false - } -- if this.Event != that1.Event { -+ if !this.F.Equal(that1.F) { - return false - } -- if this.Count != that1.Count { -+ if this.TimestampMs != that1.TimestampMs { -+ return false -+ } -+ if len(this.Metric) != len(that1.Metric) { - return false - } -+ for i := range this.Metric { -+ if !this.Metric[i].Equal(that1.Metric[i]) { -+ return false -+ } -+ } - return true - } --func (this *TopKMatrix) Equal(that interface{}) bool { -+func (this *QuantileSketch) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - -- that1, ok := that.(*TopKMatrix) -+ that1, ok := that.(*QuantileSketch) - if !ok { -- that2, ok := that.(TopKMatrix) -+ that2, ok := that.(QuantileSketch) - if ok { - that1 = &that2 - } else { -@@ -444,8 +812,248 @@ func (this *TopKMatrix) Equal(that interface{}) bool { - } else if this == nil { - return false - } -- if len(this.Values) != len(that1.Values) { -- return false -+ if that1.Sketch == nil { -+ if this.Sketch != nil { -+ return false -+ } -+ } else if this.Sketch == nil { -+ return false -+ } else if !this.Sketch.Equal(that1.Sketch) { -+ return false -+ } -+ return true -+} -+func (this *QuantileSketch_Tdigest) Equal(that interface{}) bool { -+ if that == nil { -+ return this == nil -+ } -+ -+ that1, ok := that.(*QuantileSketch_Tdigest) -+ if !ok { -+ that2, ok := that.(QuantileSketch_Tdigest) -+ if ok { -+ that1 = &that2 -+ } else { -+ return false -+ } -+ } -+ if that1 == nil { -+ return this == nil -+ } else if this == nil { -+ return false -+ } -+ if !this.Tdigest.Equal(that1.Tdigest) { -+ return false -+ } -+ return true -+} -+func (this *QuantileSketch_Ddsketch) Equal(that interface{}) bool { -+ if that == nil { -+ return this == nil -+ } -+ -+ that1, ok := that.(*QuantileSketch_Ddsketch) -+ if !ok { -+ that2, ok := that.(QuantileSketch_Ddsketch) -+ if ok { -+ that1 = &that2 -+ } else { -+ return false -+ } -+ } -+ if that1 == nil { -+ return this == nil -+ } else if this == nil { -+ return false -+ } -+ if !bytes.Equal(this.Ddsketch, that1.Ddsketch) { -+ return false -+ } -+ return true -+} -+func (this *TDigest) Equal(that interface{}) bool { -+ if that == nil { -+ return this == nil -+ } -+ -+ that1, ok := that.(*TDigest) -+ if !ok { -+ that2, ok := that.(TDigest) -+ if ok { -+ that1 = &that2 -+ } else { -+ return false -+ } -+ } -+ if that1 == nil { -+ return this == nil -+ } else if this == nil { -+ return false -+ } -+ if this.Min != that1.Min { -+ return false -+ } -+ if this.Max != that1.Max { -+ return false -+ } -+ if this.Compression != that1.Compression { -+ return false -+ } -+ if len(this.Processed) != len(that1.Processed) { -+ return false -+ } -+ for i := range this.Processed { -+ if !this.Processed[i].Equal(that1.Processed[i]) { -+ return false -+ } -+ } -+ return true -+} -+func (this *TDigest_Centroid) Equal(that interface{}) bool { -+ if that == nil { -+ return this == nil -+ } -+ -+ that1, ok := that.(*TDigest_Centroid) -+ if !ok { -+ that2, ok := that.(TDigest_Centroid) -+ if ok { -+ that1 = &that2 -+ } else { -+ return false -+ } -+ } -+ if that1 == nil { -+ return this == nil -+ } else if this == nil { -+ return false -+ } -+ if this.Mean != that1.Mean { -+ return false -+ } -+ if this.Weight != that1.Weight { -+ return false -+ } -+ return true -+} -+func (this *CountMinSketch) Equal(that interface{}) bool { -+ if that == nil { -+ return this == nil -+ } -+ -+ that1, ok := that.(*CountMinSketch) -+ if !ok { -+ that2, ok := that.(CountMinSketch) -+ if ok { -+ that1 = &that2 -+ } else { -+ return false -+ } -+ } -+ if that1 == nil { -+ return this == nil -+ } else if this == nil { -+ return false -+ } -+ if this.Depth != that1.Depth { -+ return false -+ } -+ if this.Width != that1.Width { -+ return false -+ } -+ if len(this.Counters) != len(that1.Counters) { -+ return false -+ } -+ for i := range this.Counters { -+ if this.Counters[i] != that1.Counters[i] { -+ return false -+ } -+ } -+ return true -+} -+func (this *TopK) Equal(that interface{}) bool { -+ if that == nil { -+ return this == nil -+ } -+ -+ that1, ok := that.(*TopK) -+ if !ok { -+ that2, ok := that.(TopK) -+ if ok { -+ that1 = &that2 -+ } else { -+ return false -+ } -+ } -+ if that1 == nil { -+ return this == nil -+ } else if this == nil { -+ return false -+ } -+ if !this.Cms.Equal(that1.Cms) { -+ return false -+ } -+ if len(this.List) != len(that1.List) { -+ return false -+ } -+ for i := range this.List { -+ if !this.List[i].Equal(that1.List[i]) { -+ return false -+ } -+ } -+ if !bytes.Equal(this.Hyperloglog, that1.Hyperloglog) { -+ return false -+ } -+ return true -+} -+func (this *TopK_Pair) Equal(that interface{}) bool { -+ if that == nil { -+ return this == nil -+ } -+ -+ that1, ok := that.(*TopK_Pair) -+ if !ok { -+ that2, ok := that.(TopK_Pair) -+ if ok { -+ that1 = &that2 -+ } else { -+ return false -+ } -+ } -+ if that1 == nil { -+ return this == nil -+ } else if this == nil { -+ return false -+ } -+ if this.Event != that1.Event { -+ return false -+ } -+ if this.Count != that1.Count { -+ return false -+ } -+ return true -+} -+func (this *TopKMatrix) Equal(that interface{}) bool { -+ if that == nil { -+ return this == nil -+ } -+ -+ that1, ok := that.(*TopKMatrix) -+ if !ok { -+ that2, ok := that.(TopKMatrix) -+ if ok { -+ that1 = &that2 -+ } else { -+ return false -+ } -+ } -+ if that1 == nil { -+ return this == nil -+ } else if this == nil { -+ return false -+ } -+ if len(this.Values) != len(that1.Values) { -+ return false - } - for i := range this.Values { - if !this.Values[i].Equal(that1.Values[i]) { -@@ -481,65 +1089,159 @@ func (this *TopKMatrix_Vector) Equal(that interface{}) bool { - } - return true - } --func (this *CountMinSketch) GoString() string { -+func (this *QuantileSketchMatrix) GoString() string { - if this == nil { - return ""nil"" - } -- s := make([]string, 0, 7) -- s = append(s, ""&logproto.CountMinSketch{"") -- s = append(s, ""Depth: ""+fmt.Sprintf(""%#v"", this.Depth)+"",\n"") -- s = append(s, ""Width: ""+fmt.Sprintf(""%#v"", this.Width)+"",\n"") -- s = append(s, ""Counters: ""+fmt.Sprintf(""%#v"", this.Counters)+"",\n"") -+ s := make([]string, 0, 5) -+ s = append(s, ""&logproto.QuantileSketchMatrix{"") -+ if this.Values != nil { -+ s = append(s, ""Values: ""+fmt.Sprintf(""%#v"", this.Values)+"",\n"") -+ } - s = append(s, ""}"") - return strings.Join(s, """") - } --func (this *TopK) GoString() string { -+func (this *QuantileSketchVector) GoString() string { - if this == nil { - return ""nil"" - } -- s := make([]string, 0, 7) -- s = append(s, ""&logproto.TopK{"") -- if this.Cms != nil { -- s = append(s, ""Cms: ""+fmt.Sprintf(""%#v"", this.Cms)+"",\n"") -- } -- if this.List != nil { -- s = append(s, ""List: ""+fmt.Sprintf(""%#v"", this.List)+"",\n"") -+ s := make([]string, 0, 5) -+ s = append(s, ""&logproto.QuantileSketchVector{"") -+ if this.Samples != nil { -+ s = append(s, ""Samples: ""+fmt.Sprintf(""%#v"", this.Samples)+"",\n"") - } -- s = append(s, ""Hyperloglog: ""+fmt.Sprintf(""%#v"", this.Hyperloglog)+"",\n"") - s = append(s, ""}"") - return strings.Join(s, """") - } --func (this *TopK_Pair) GoString() string { -+func (this *QuantileSketchSample) GoString() string { - if this == nil { - return ""nil"" - } -- s := make([]string, 0, 6) -- s = append(s, ""&logproto.TopK_Pair{"") -- s = append(s, ""Event: ""+fmt.Sprintf(""%#v"", this.Event)+"",\n"") -- s = append(s, ""Count: ""+fmt.Sprintf(""%#v"", this.Count)+"",\n"") -+ s := make([]string, 0, 7) -+ s = append(s, ""&logproto.QuantileSketchSample{"") -+ if this.F != nil { -+ s = append(s, ""F: ""+fmt.Sprintf(""%#v"", this.F)+"",\n"") -+ } -+ s = append(s, ""TimestampMs: ""+fmt.Sprintf(""%#v"", this.TimestampMs)+"",\n"") -+ if this.Metric != nil { -+ s = append(s, ""Metric: ""+fmt.Sprintf(""%#v"", this.Metric)+"",\n"") -+ } - s = append(s, ""}"") - return strings.Join(s, """") - } --func (this *TopKMatrix) GoString() string { -+func (this *QuantileSketch) GoString() string { - if this == nil { - return ""nil"" - } -- s := make([]string, 0, 5) -- s = append(s, ""&logproto.TopKMatrix{"") -- if this.Values != nil { -- s = append(s, ""Values: ""+fmt.Sprintf(""%#v"", this.Values)+"",\n"") -+ s := make([]string, 0, 6) -+ s = append(s, ""&logproto.QuantileSketch{"") -+ if this.Sketch != nil { -+ s = append(s, ""Sketch: ""+fmt.Sprintf(""%#v"", this.Sketch)+"",\n"") - } - s = append(s, ""}"") - return strings.Join(s, """") - } --func (this *TopKMatrix_Vector) GoString() string { -+func (this *QuantileSketch_Tdigest) GoString() string { - if this == nil { - return ""nil"" - } -- s := make([]string, 0, 6) -- s = append(s, ""&logproto.TopKMatrix_Vector{"") -- if this.Topk != nil { -- s = append(s, ""Topk: ""+fmt.Sprintf(""%#v"", this.Topk)+"",\n"") -+ s := strings.Join([]string{`&logproto.QuantileSketch_Tdigest{` + -+ `Tdigest:` + fmt.Sprintf(""%#v"", this.Tdigest) + `}`}, "", "") -+ return s -+} -+func (this *QuantileSketch_Ddsketch) GoString() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := strings.Join([]string{`&logproto.QuantileSketch_Ddsketch{` + -+ `Ddsketch:` + fmt.Sprintf(""%#v"", this.Ddsketch) + `}`}, "", "") -+ return s -+} -+func (this *TDigest) GoString() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := make([]string, 0, 8) -+ s = append(s, ""&logproto.TDigest{"") -+ s = append(s, ""Min: ""+fmt.Sprintf(""%#v"", this.Min)+"",\n"") -+ s = append(s, ""Max: ""+fmt.Sprintf(""%#v"", this.Max)+"",\n"") -+ s = append(s, ""Compression: ""+fmt.Sprintf(""%#v"", this.Compression)+"",\n"") -+ if this.Processed != nil { -+ s = append(s, ""Processed: ""+fmt.Sprintf(""%#v"", this.Processed)+"",\n"") -+ } -+ s = append(s, ""}"") -+ return strings.Join(s, """") -+} -+func (this *TDigest_Centroid) GoString() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := make([]string, 0, 6) -+ s = append(s, ""&logproto.TDigest_Centroid{"") -+ s = append(s, ""Mean: ""+fmt.Sprintf(""%#v"", this.Mean)+"",\n"") -+ s = append(s, ""Weight: ""+fmt.Sprintf(""%#v"", this.Weight)+"",\n"") -+ s = append(s, ""}"") -+ return strings.Join(s, """") -+} -+func (this *CountMinSketch) GoString() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := make([]string, 0, 7) -+ s = append(s, ""&logproto.CountMinSketch{"") -+ s = append(s, ""Depth: ""+fmt.Sprintf(""%#v"", this.Depth)+"",\n"") -+ s = append(s, ""Width: ""+fmt.Sprintf(""%#v"", this.Width)+"",\n"") -+ s = append(s, ""Counters: ""+fmt.Sprintf(""%#v"", this.Counters)+"",\n"") -+ s = append(s, ""}"") -+ return strings.Join(s, """") -+} -+func (this *TopK) GoString() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := make([]string, 0, 7) -+ s = append(s, ""&logproto.TopK{"") -+ if this.Cms != nil { -+ s = append(s, ""Cms: ""+fmt.Sprintf(""%#v"", this.Cms)+"",\n"") -+ } -+ if this.List != nil { -+ s = append(s, ""List: ""+fmt.Sprintf(""%#v"", this.List)+"",\n"") -+ } -+ s = append(s, ""Hyperloglog: ""+fmt.Sprintf(""%#v"", this.Hyperloglog)+"",\n"") -+ s = append(s, ""}"") -+ return strings.Join(s, """") -+} -+func (this *TopK_Pair) GoString() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := make([]string, 0, 6) -+ s = append(s, ""&logproto.TopK_Pair{"") -+ s = append(s, ""Event: ""+fmt.Sprintf(""%#v"", this.Event)+"",\n"") -+ s = append(s, ""Count: ""+fmt.Sprintf(""%#v"", this.Count)+"",\n"") -+ s = append(s, ""}"") -+ return strings.Join(s, """") -+} -+func (this *TopKMatrix) GoString() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := make([]string, 0, 5) -+ s = append(s, ""&logproto.TopKMatrix{"") -+ if this.Values != nil { -+ s = append(s, ""Values: ""+fmt.Sprintf(""%#v"", this.Values)+"",\n"") -+ } -+ s = append(s, ""}"") -+ return strings.Join(s, """") -+} -+func (this *TopKMatrix_Vector) GoString() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := make([]string, 0, 6) -+ s = append(s, ""&logproto.TopKMatrix_Vector{"") -+ if this.Topk != nil { -+ s = append(s, ""Topk: ""+fmt.Sprintf(""%#v"", this.Topk)+"",\n"") - } - s = append(s, ""TimestampMs: ""+fmt.Sprintf(""%#v"", this.TimestampMs)+"",\n"") - s = append(s, ""}"") -@@ -553,7 +1255,7 @@ func valueToGoStringSketch(v interface{}, typ string) string { - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf(""func(v %v) *%v { return &v } ( %#v )"", typ, typ, pv) - } --func (m *CountMinSketch) Marshal() (dAtA []byte, err error) { -+func (m *QuantileSketchMatrix) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) -@@ -563,48 +1265,71 @@ func (m *CountMinSketch) Marshal() (dAtA []byte, err error) { - return dAtA[:n], nil - } - --func (m *CountMinSketch) MarshalTo(dAtA []byte) (int, error) { -+func (m *QuantileSketchMatrix) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) - } - --func (m *CountMinSketch) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+func (m *QuantileSketchMatrix) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l -- if len(m.Counters) > 0 { -- dAtA2 := make([]byte, len(m.Counters)*10) -- var j1 int -- for _, num := range m.Counters { -- for num >= 1<<7 { -- dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) -- num >>= 7 -- j1++ -+ if len(m.Values) > 0 { -+ for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { -+ { -+ size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i]) -+ if err != nil { -+ return 0, err -+ } -+ i -= size -+ i = encodeVarintSketch(dAtA, i, uint64(size)) - } -- dAtA2[j1] = uint8(num) -- j1++ -+ i-- -+ dAtA[i] = 0xa - } -- i -= j1 -- copy(dAtA[i:], dAtA2[:j1]) -- i = encodeVarintSketch(dAtA, i, uint64(j1)) -- i-- -- dAtA[i] = 0x1a - } -- if m.Width != 0 { -- i = encodeVarintSketch(dAtA, i, uint64(m.Width)) -- i-- -- dAtA[i] = 0x10 -+ return len(dAtA) - i, nil -+} -+ -+func (m *QuantileSketchVector) Marshal() (dAtA []byte, err error) { -+ size := m.Size() -+ dAtA = make([]byte, size) -+ n, err := m.MarshalToSizedBuffer(dAtA[:size]) -+ if err != nil { -+ return nil, err - } -- if m.Depth != 0 { -- i = encodeVarintSketch(dAtA, i, uint64(m.Depth)) -- i-- -- dAtA[i] = 0x8 -+ return dAtA[:n], nil -+} -+ -+func (m *QuantileSketchVector) MarshalTo(dAtA []byte) (int, error) { -+ size := m.Size() -+ return m.MarshalToSizedBuffer(dAtA[:size]) -+} -+ -+func (m *QuantileSketchVector) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+ i := len(dAtA) -+ _ = i -+ var l int -+ _ = l -+ if len(m.Samples) > 0 { -+ for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { -+ { -+ size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) -+ if err != nil { -+ return 0, err -+ } -+ i -= size -+ i = encodeVarintSketch(dAtA, i, uint64(size)) -+ } -+ i-- -+ dAtA[i] = 0xa -+ } - } - return len(dAtA) - i, nil - } - --func (m *TopK) Marshal() (dAtA []byte, err error) { -+func (m *QuantileSketchSample) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) -@@ -614,27 +1339,20 @@ func (m *TopK) Marshal() (dAtA []byte, err error) { - return dAtA[:n], nil - } - --func (m *TopK) MarshalTo(dAtA []byte) (int, error) { -+func (m *QuantileSketchSample) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) - } - --func (m *TopK) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+func (m *QuantileSketchSample) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l -- if len(m.Hyperloglog) > 0 { -- i -= len(m.Hyperloglog) -- copy(dAtA[i:], m.Hyperloglog) -- i = encodeVarintSketch(dAtA, i, uint64(len(m.Hyperloglog))) -- i-- -- dAtA[i] = 0x1a -- } -- if len(m.List) > 0 { -- for iNdEx := len(m.List) - 1; iNdEx >= 0; iNdEx-- { -+ if len(m.Metric) > 0 { -+ for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { - { -- size, err := m.List[iNdEx].MarshalToSizedBuffer(dAtA[:i]) -+ size, err := m.Metric[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } -@@ -642,12 +1360,17 @@ func (m *TopK) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i = encodeVarintSketch(dAtA, i, uint64(size)) - } - i-- -- dAtA[i] = 0x12 -+ dAtA[i] = 0x1a - } - } -- if m.Cms != nil { -+ if m.TimestampMs != 0 { -+ i = encodeVarintSketch(dAtA, i, uint64(m.TimestampMs)) -+ i-- -+ dAtA[i] = 0x10 -+ } -+ if m.F != nil { - { -- size, err := m.Cms.MarshalToSizedBuffer(dAtA[:i]) -+ size, err := m.F.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } -@@ -660,7 +1383,7 @@ func (m *TopK) MarshalToSizedBuffer(dAtA []byte) (int, error) { - return len(dAtA) - i, nil - } - --func (m *TopK_Pair) Marshal() (dAtA []byte, err error) { -+func (m *QuantileSketch) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) -@@ -670,32 +1393,64 @@ func (m *TopK_Pair) Marshal() (dAtA []byte, err error) { - return dAtA[:n], nil - } - --func (m *TopK_Pair) MarshalTo(dAtA []byte) (int, error) { -+func (m *QuantileSketch) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) - } - --func (m *TopK_Pair) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+func (m *QuantileSketch) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l -- if m.Count != 0 { -- i = encodeVarintSketch(dAtA, i, uint64(m.Count)) -- i-- -- dAtA[i] = 0x10 -+ if m.Sketch != nil { -+ { -+ size := m.Sketch.Size() -+ i -= size -+ if _, err := m.Sketch.MarshalTo(dAtA[i:]); err != nil { -+ return 0, err -+ } -+ } - } -- if len(m.Event) > 0 { -- i -= len(m.Event) -- copy(dAtA[i:], m.Event) -- i = encodeVarintSketch(dAtA, i, uint64(len(m.Event))) -+ return len(dAtA) - i, nil -+} -+ -+func (m *QuantileSketch_Tdigest) MarshalTo(dAtA []byte) (int, error) { -+ return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -+} -+ -+func (m *QuantileSketch_Tdigest) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+ i := len(dAtA) -+ if m.Tdigest != nil { -+ { -+ size, err := m.Tdigest.MarshalToSizedBuffer(dAtA[:i]) -+ if err != nil { -+ return 0, err -+ } -+ i -= size -+ i = encodeVarintSketch(dAtA, i, uint64(size)) -+ } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil - } -+func (m *QuantileSketch_Ddsketch) MarshalTo(dAtA []byte) (int, error) { -+ return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -+} - --func (m *TopKMatrix) Marshal() (dAtA []byte, err error) { -+func (m *QuantileSketch_Ddsketch) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+ i := len(dAtA) -+ if m.Ddsketch != nil { -+ i -= len(m.Ddsketch) -+ copy(dAtA[i:], m.Ddsketch) -+ i = encodeVarintSketch(dAtA, i, uint64(len(m.Ddsketch))) -+ i-- -+ dAtA[i] = 0x12 -+ } -+ return len(dAtA) - i, nil -+} -+func (m *TDigest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) -@@ -705,20 +1460,20 @@ func (m *TopKMatrix) Marshal() (dAtA []byte, err error) { - return dAtA[:n], nil - } - --func (m *TopKMatrix) MarshalTo(dAtA []byte) (int, error) { -+func (m *TDigest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) - } - --func (m *TopKMatrix) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+func (m *TDigest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l -- if len(m.Values) > 0 { -- for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { -+ if len(m.Processed) > 0 { -+ for iNdEx := len(m.Processed) - 1; iNdEx >= 0; iNdEx-- { - { -- size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i]) -+ size, err := m.Processed[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } -@@ -726,13 +1481,31 @@ func (m *TopKMatrix) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i = encodeVarintSketch(dAtA, i, uint64(size)) - } - i-- -- dAtA[i] = 0xa -+ dAtA[i] = 0x22 - } - } -+ if m.Compression != 0 { -+ i -= 8 -+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Compression)))) -+ i-- -+ dAtA[i] = 0x19 -+ } -+ if m.Max != 0 { -+ i -= 8 -+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Max)))) -+ i-- -+ dAtA[i] = 0x11 -+ } -+ if m.Min != 0 { -+ i -= 8 -+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Min)))) -+ i-- -+ dAtA[i] = 0x9 -+ } - return len(dAtA) - i, nil - } - --func (m *TopKMatrix_Vector) Marshal() (dAtA []byte, err error) { -+func (m *TDigest_Centroid) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) -@@ -742,109 +1515,262 @@ func (m *TopKMatrix_Vector) Marshal() (dAtA []byte, err error) { - return dAtA[:n], nil - } - --func (m *TopKMatrix_Vector) MarshalTo(dAtA []byte) (int, error) { -+func (m *TDigest_Centroid) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) - } - --func (m *TopKMatrix_Vector) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+func (m *TDigest_Centroid) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l -- if m.TimestampMs != 0 { -- i = encodeVarintSketch(dAtA, i, uint64(m.TimestampMs)) -+ if m.Weight != 0 { -+ i -= 8 -+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Weight)))) - i-- -- dAtA[i] = 0x10 -+ dAtA[i] = 0x11 - } -- if m.Topk != nil { -- { -- size, err := m.Topk.MarshalToSizedBuffer(dAtA[:i]) -- if err != nil { -- return 0, err -- } -- i -= size -- i = encodeVarintSketch(dAtA, i, uint64(size)) -- } -+ if m.Mean != 0 { -+ i -= 8 -+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Mean)))) - i-- -- dAtA[i] = 0xa -+ dAtA[i] = 0x9 - } - return len(dAtA) - i, nil - } - --func encodeVarintSketch(dAtA []byte, offset int, v uint64) int { -- offset -= sovSketch(v) -- base := offset -- for v >= 1<<7 { -- dAtA[offset] = uint8(v&0x7f | 0x80) -- v >>= 7 -- offset++ -+func (m *CountMinSketch) Marshal() (dAtA []byte, err error) { -+ size := m.Size() -+ dAtA = make([]byte, size) -+ n, err := m.MarshalToSizedBuffer(dAtA[:size]) -+ if err != nil { -+ return nil, err - } -- dAtA[offset] = uint8(v) -- return base -+ return dAtA[:n], nil - } --func (m *CountMinSketch) Size() (n int) { -- if m == nil { -- return 0 -- } -+ -+func (m *CountMinSketch) MarshalTo(dAtA []byte) (int, error) { -+ size := m.Size() -+ return m.MarshalToSizedBuffer(dAtA[:size]) -+} -+ -+func (m *CountMinSketch) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+ i := len(dAtA) -+ _ = i - var l int - _ = l -- if m.Depth != 0 { -- n += 1 + sovSketch(uint64(m.Depth)) -- } -- if m.Width != 0 { -- n += 1 + sovSketch(uint64(m.Width)) -- } - if len(m.Counters) > 0 { -- l = 0 -- for _, e := range m.Counters { -- l += sovSketch(uint64(e)) -+ dAtA4 := make([]byte, len(m.Counters)*10) -+ var j3 int -+ for _, num := range m.Counters { -+ for num >= 1<<7 { -+ dAtA4[j3] = uint8(uint64(num)&0x7f | 0x80) -+ num >>= 7 -+ j3++ -+ } -+ dAtA4[j3] = uint8(num) -+ j3++ - } -- n += 1 + sovSketch(uint64(l)) + l -+ i -= j3 -+ copy(dAtA[i:], dAtA4[:j3]) -+ i = encodeVarintSketch(dAtA, i, uint64(j3)) -+ i-- -+ dAtA[i] = 0x1a - } -- return n -+ if m.Width != 0 { -+ i = encodeVarintSketch(dAtA, i, uint64(m.Width)) -+ i-- -+ dAtA[i] = 0x10 -+ } -+ if m.Depth != 0 { -+ i = encodeVarintSketch(dAtA, i, uint64(m.Depth)) -+ i-- -+ dAtA[i] = 0x8 -+ } -+ return len(dAtA) - i, nil - } - --func (m *TopK) Size() (n int) { -- if m == nil { -- return 0 -+func (m *TopK) Marshal() (dAtA []byte, err error) { -+ size := m.Size() -+ dAtA = make([]byte, size) -+ n, err := m.MarshalToSizedBuffer(dAtA[:size]) -+ if err != nil { -+ return nil, err - } -+ return dAtA[:n], nil -+} -+ -+func (m *TopK) MarshalTo(dAtA []byte) (int, error) { -+ size := m.Size() -+ return m.MarshalToSizedBuffer(dAtA[:size]) -+} -+ -+func (m *TopK) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+ i := len(dAtA) -+ _ = i - var l int - _ = l -- if m.Cms != nil { -- l = m.Cms.Size() -- n += 1 + l + sovSketch(uint64(l)) -+ if len(m.Hyperloglog) > 0 { -+ i -= len(m.Hyperloglog) -+ copy(dAtA[i:], m.Hyperloglog) -+ i = encodeVarintSketch(dAtA, i, uint64(len(m.Hyperloglog))) -+ i-- -+ dAtA[i] = 0x1a - } - if len(m.List) > 0 { -- for _, e := range m.List { -- l = e.Size() -- n += 1 + l + sovSketch(uint64(l)) -+ for iNdEx := len(m.List) - 1; iNdEx >= 0; iNdEx-- { -+ { -+ size, err := m.List[iNdEx].MarshalToSizedBuffer(dAtA[:i]) -+ if err != nil { -+ return 0, err -+ } -+ i -= size -+ i = encodeVarintSketch(dAtA, i, uint64(size)) -+ } -+ i-- -+ dAtA[i] = 0x12 - } - } -- l = len(m.Hyperloglog) -- if l > 0 { -- n += 1 + l + sovSketch(uint64(l)) -+ if m.Cms != nil { -+ { -+ size, err := m.Cms.MarshalToSizedBuffer(dAtA[:i]) -+ if err != nil { -+ return 0, err -+ } -+ i -= size -+ i = encodeVarintSketch(dAtA, i, uint64(size)) -+ } -+ i-- -+ dAtA[i] = 0xa - } -- return n -+ return len(dAtA) - i, nil - } - --func (m *TopK_Pair) Size() (n int) { -- if m == nil { -- return 0 -+func (m *TopK_Pair) Marshal() (dAtA []byte, err error) { -+ size := m.Size() -+ dAtA = make([]byte, size) -+ n, err := m.MarshalToSizedBuffer(dAtA[:size]) -+ if err != nil { -+ return nil, err - } -+ return dAtA[:n], nil -+} -+ -+func (m *TopK_Pair) MarshalTo(dAtA []byte) (int, error) { -+ size := m.Size() -+ return m.MarshalToSizedBuffer(dAtA[:size]) -+} -+ -+func (m *TopK_Pair) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+ i := len(dAtA) -+ _ = i - var l int - _ = l -- l = len(m.Event) -- if l > 0 { -- n += 1 + l + sovSketch(uint64(l)) -- } - if m.Count != 0 { -- n += 1 + sovSketch(uint64(m.Count)) -+ i = encodeVarintSketch(dAtA, i, uint64(m.Count)) -+ i-- -+ dAtA[i] = 0x10 - } -- return n -+ if len(m.Event) > 0 { -+ i -= len(m.Event) -+ copy(dAtA[i:], m.Event) -+ i = encodeVarintSketch(dAtA, i, uint64(len(m.Event))) -+ i-- -+ dAtA[i] = 0xa -+ } -+ return len(dAtA) - i, nil - } - --func (m *TopKMatrix) Size() (n int) { -+func (m *TopKMatrix) Marshal() (dAtA []byte, err error) { -+ size := m.Size() -+ dAtA = make([]byte, size) -+ n, err := m.MarshalToSizedBuffer(dAtA[:size]) -+ if err != nil { -+ return nil, err -+ } -+ return dAtA[:n], nil -+} -+ -+func (m *TopKMatrix) MarshalTo(dAtA []byte) (int, error) { -+ size := m.Size() -+ return m.MarshalToSizedBuffer(dAtA[:size]) -+} -+ -+func (m *TopKMatrix) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+ i := len(dAtA) -+ _ = i -+ var l int -+ _ = l -+ if len(m.Values) > 0 { -+ for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { -+ { -+ size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i]) -+ if err != nil { -+ return 0, err -+ } -+ i -= size -+ i = encodeVarintSketch(dAtA, i, uint64(size)) -+ } -+ i-- -+ dAtA[i] = 0xa -+ } -+ } -+ return len(dAtA) - i, nil -+} -+ -+func (m *TopKMatrix_Vector) Marshal() (dAtA []byte, err error) { -+ size := m.Size() -+ dAtA = make([]byte, size) -+ n, err := m.MarshalToSizedBuffer(dAtA[:size]) -+ if err != nil { -+ return nil, err -+ } -+ return dAtA[:n], nil -+} -+ -+func (m *TopKMatrix_Vector) MarshalTo(dAtA []byte) (int, error) { -+ size := m.Size() -+ return m.MarshalToSizedBuffer(dAtA[:size]) -+} -+ -+func (m *TopKMatrix_Vector) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+ i := len(dAtA) -+ _ = i -+ var l int -+ _ = l -+ if m.TimestampMs != 0 { -+ i = encodeVarintSketch(dAtA, i, uint64(m.TimestampMs)) -+ i-- -+ dAtA[i] = 0x10 -+ } -+ if m.Topk != nil { -+ { -+ size, err := m.Topk.MarshalToSizedBuffer(dAtA[:i]) -+ if err != nil { -+ return 0, err -+ } -+ i -= size -+ i = encodeVarintSketch(dAtA, i, uint64(size)) -+ } -+ i-- -+ dAtA[i] = 0xa -+ } -+ return len(dAtA) - i, nil -+} -+ -+func encodeVarintSketch(dAtA []byte, offset int, v uint64) int { -+ offset -= sovSketch(v) -+ base := offset -+ for v >= 1<<7 { -+ dAtA[offset] = uint8(v&0x7f | 0x80) -+ v >>= 7 -+ offset++ -+ } -+ dAtA[offset] = uint8(v) -+ return base -+} -+func (m *QuantileSketchMatrix) Size() (n int) { - if m == nil { - return 0 - } -@@ -859,45 +1785,339 @@ func (m *TopKMatrix) Size() (n int) { - return n - } - --func (m *TopKMatrix_Vector) Size() (n int) { -+func (m *QuantileSketchVector) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l -- if m.Topk != nil { -- l = m.Topk.Size() -+ if len(m.Samples) > 0 { -+ for _, e := range m.Samples { -+ l = e.Size() -+ n += 1 + l + sovSketch(uint64(l)) -+ } -+ } -+ return n -+} -+ -+func (m *QuantileSketchSample) Size() (n int) { -+ if m == nil { -+ return 0 -+ } -+ var l int -+ _ = l -+ if m.F != nil { -+ l = m.F.Size() - n += 1 + l + sovSketch(uint64(l)) - } - if m.TimestampMs != 0 { - n += 1 + sovSketch(uint64(m.TimestampMs)) - } -+ if len(m.Metric) > 0 { -+ for _, e := range m.Metric { -+ l = e.Size() -+ n += 1 + l + sovSketch(uint64(l)) -+ } -+ } - return n - } - --func sovSketch(x uint64) (n int) { -- return (math_bits.Len64(x|1) + 6) / 7 --} --func sozSketch(x uint64) (n int) { -- return sovSketch(uint64((x << 1) ^ uint64((int64(x) >> 63)))) --} --func (this *CountMinSketch) String() string { -- if this == nil { -- return ""nil"" -+func (m *QuantileSketch) Size() (n int) { -+ if m == nil { -+ return 0 - } -- s := strings.Join([]string{`&CountMinSketch{`, -- `Depth:` + fmt.Sprintf(""%v"", this.Depth) + `,`, -- `Width:` + fmt.Sprintf(""%v"", this.Width) + `,`, -- `Counters:` + fmt.Sprintf(""%v"", this.Counters) + `,`, -- `}`, -- }, """") -- return s -+ var l int -+ _ = l -+ if m.Sketch != nil { -+ n += m.Sketch.Size() -+ } -+ return n - } --func (this *TopK) String() string { -- if this == nil { -- return ""nil"" -+ -+func (m *QuantileSketch_Tdigest) Size() (n int) { -+ if m == nil { -+ return 0 - } -- repeatedStringForList := ""[]*TopK_Pair{"" -+ var l int -+ _ = l -+ if m.Tdigest != nil { -+ l = m.Tdigest.Size() -+ n += 1 + l + sovSketch(uint64(l)) -+ } -+ return n -+} -+func (m *QuantileSketch_Ddsketch) Size() (n int) { -+ if m == nil { -+ return 0 -+ } -+ var l int -+ _ = l -+ if m.Ddsketch != nil { -+ l = len(m.Ddsketch) -+ n += 1 + l + sovSketch(uint64(l)) -+ } -+ return n -+} -+func (m *TDigest) Size() (n int) { -+ if m == nil { -+ return 0 -+ } -+ var l int -+ _ = l -+ if m.Min != 0 { -+ n += 9 -+ } -+ if m.Max != 0 { -+ n += 9 -+ } -+ if m.Compression != 0 { -+ n += 9 -+ } -+ if len(m.Processed) > 0 { -+ for _, e := range m.Processed { -+ l = e.Size() -+ n += 1 + l + sovSketch(uint64(l)) -+ } -+ } -+ return n -+} -+ -+func (m *TDigest_Centroid) Size() (n int) { -+ if m == nil { -+ return 0 -+ } -+ var l int -+ _ = l -+ if m.Mean != 0 { -+ n += 9 -+ } -+ if m.Weight != 0 { -+ n += 9 -+ } -+ return n -+} -+ -+func (m *CountMinSketch) Size() (n int) { -+ if m == nil { -+ return 0 -+ } -+ var l int -+ _ = l -+ if m.Depth != 0 { -+ n += 1 + sovSketch(uint64(m.Depth)) -+ } -+ if m.Width != 0 { -+ n += 1 + sovSketch(uint64(m.Width)) -+ } -+ if len(m.Counters) > 0 { -+ l = 0 -+ for _, e := range m.Counters { -+ l += sovSketch(uint64(e)) -+ } -+ n += 1 + sovSketch(uint64(l)) + l -+ } -+ return n -+} -+ -+func (m *TopK) Size() (n int) { -+ if m == nil { -+ return 0 -+ } -+ var l int -+ _ = l -+ if m.Cms != nil { -+ l = m.Cms.Size() -+ n += 1 + l + sovSketch(uint64(l)) -+ } -+ if len(m.List) > 0 { -+ for _, e := range m.List { -+ l = e.Size() -+ n += 1 + l + sovSketch(uint64(l)) -+ } -+ } -+ l = len(m.Hyperloglog) -+ if l > 0 { -+ n += 1 + l + sovSketch(uint64(l)) -+ } -+ return n -+} -+ -+func (m *TopK_Pair) Size() (n int) { -+ if m == nil { -+ return 0 -+ } -+ var l int -+ _ = l -+ l = len(m.Event) -+ if l > 0 { -+ n += 1 + l + sovSketch(uint64(l)) -+ } -+ if m.Count != 0 { -+ n += 1 + sovSketch(uint64(m.Count)) -+ } -+ return n -+} -+ -+func (m *TopKMatrix) Size() (n int) { -+ if m == nil { -+ return 0 -+ } -+ var l int -+ _ = l -+ if len(m.Values) > 0 { -+ for _, e := range m.Values { -+ l = e.Size() -+ n += 1 + l + sovSketch(uint64(l)) -+ } -+ } -+ return n -+} -+ -+func (m *TopKMatrix_Vector) Size() (n int) { -+ if m == nil { -+ return 0 -+ } -+ var l int -+ _ = l -+ if m.Topk != nil { -+ l = m.Topk.Size() -+ n += 1 + l + sovSketch(uint64(l)) -+ } -+ if m.TimestampMs != 0 { -+ n += 1 + sovSketch(uint64(m.TimestampMs)) -+ } -+ return n -+} -+ -+func sovSketch(x uint64) (n int) { -+ return (math_bits.Len64(x|1) + 6) / 7 -+} -+func sozSketch(x uint64) (n int) { -+ return sovSketch(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -+} -+func (this *QuantileSketchMatrix) String() string { -+ if this == nil { -+ return ""nil"" -+ } -+ repeatedStringForValues := ""[]*QuantileSketchVector{"" -+ for _, f := range this.Values { -+ repeatedStringForValues += strings.Replace(f.String(), ""QuantileSketchVector"", ""QuantileSketchVector"", 1) + "","" -+ } -+ repeatedStringForValues += ""}"" -+ s := strings.Join([]string{`&QuantileSketchMatrix{`, -+ `Values:` + repeatedStringForValues + `,`, -+ `}`, -+ }, """") -+ return s -+} -+func (this *QuantileSketchVector) String() string { -+ if this == nil { -+ return ""nil"" -+ } -+ repeatedStringForSamples := ""[]*QuantileSketchSample{"" -+ for _, f := range this.Samples { -+ repeatedStringForSamples += strings.Replace(f.String(), ""QuantileSketchSample"", ""QuantileSketchSample"", 1) + "","" -+ } -+ repeatedStringForSamples += ""}"" -+ s := strings.Join([]string{`&QuantileSketchVector{`, -+ `Samples:` + repeatedStringForSamples + `,`, -+ `}`, -+ }, """") -+ return s -+} -+func (this *QuantileSketchSample) String() string { -+ if this == nil { -+ return ""nil"" -+ } -+ repeatedStringForMetric := ""[]*LabelPair{"" -+ for _, f := range this.Metric { -+ repeatedStringForMetric += strings.Replace(fmt.Sprintf(""%v"", f), ""LabelPair"", ""LabelPair"", 1) + "","" -+ } -+ repeatedStringForMetric += ""}"" -+ s := strings.Join([]string{`&QuantileSketchSample{`, -+ `F:` + strings.Replace(this.F.String(), ""QuantileSketch"", ""QuantileSketch"", 1) + `,`, -+ `TimestampMs:` + fmt.Sprintf(""%v"", this.TimestampMs) + `,`, -+ `Metric:` + repeatedStringForMetric + `,`, -+ `}`, -+ }, """") -+ return s -+} -+func (this *QuantileSketch) String() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := strings.Join([]string{`&QuantileSketch{`, -+ `Sketch:` + fmt.Sprintf(""%v"", this.Sketch) + `,`, -+ `}`, -+ }, """") -+ return s -+} -+func (this *QuantileSketch_Tdigest) String() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := strings.Join([]string{`&QuantileSketch_Tdigest{`, -+ `Tdigest:` + strings.Replace(fmt.Sprintf(""%v"", this.Tdigest), ""TDigest"", ""TDigest"", 1) + `,`, -+ `}`, -+ }, """") -+ return s -+} -+func (this *QuantileSketch_Ddsketch) String() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := strings.Join([]string{`&QuantileSketch_Ddsketch{`, -+ `Ddsketch:` + fmt.Sprintf(""%v"", this.Ddsketch) + `,`, -+ `}`, -+ }, """") -+ return s -+} -+func (this *TDigest) String() string { -+ if this == nil { -+ return ""nil"" -+ } -+ repeatedStringForProcessed := ""[]*TDigest_Centroid{"" -+ for _, f := range this.Processed { -+ repeatedStringForProcessed += strings.Replace(fmt.Sprintf(""%v"", f), ""TDigest_Centroid"", ""TDigest_Centroid"", 1) + "","" -+ } -+ repeatedStringForProcessed += ""}"" -+ s := strings.Join([]string{`&TDigest{`, -+ `Min:` + fmt.Sprintf(""%v"", this.Min) + `,`, -+ `Max:` + fmt.Sprintf(""%v"", this.Max) + `,`, -+ `Compression:` + fmt.Sprintf(""%v"", this.Compression) + `,`, -+ `Processed:` + repeatedStringForProcessed + `,`, -+ `}`, -+ }, """") -+ return s -+} -+func (this *TDigest_Centroid) String() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := strings.Join([]string{`&TDigest_Centroid{`, -+ `Mean:` + fmt.Sprintf(""%v"", this.Mean) + `,`, -+ `Weight:` + fmt.Sprintf(""%v"", this.Weight) + `,`, -+ `}`, -+ }, """") -+ return s -+} -+func (this *CountMinSketch) String() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := strings.Join([]string{`&CountMinSketch{`, -+ `Depth:` + fmt.Sprintf(""%v"", this.Depth) + `,`, -+ `Width:` + fmt.Sprintf(""%v"", this.Width) + `,`, -+ `Counters:` + fmt.Sprintf(""%v"", this.Counters) + `,`, -+ `}`, -+ }, """") -+ return s -+} -+func (this *TopK) String() string { -+ if this == nil { -+ return ""nil"" -+ } -+ repeatedStringForList := ""[]*TopK_Pair{"" - for _, f := range this.List { - repeatedStringForList += strings.Replace(fmt.Sprintf(""%v"", f), ""TopK_Pair"", ""TopK_Pair"", 1) + "","" - } -@@ -955,6 +2175,638 @@ func valueToStringSketch(v interface{}) string { - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf(""*%v"", pv) - } -+func (m *QuantileSketchMatrix) Unmarshal(dAtA []byte) error { -+ l := len(dAtA) -+ iNdEx := 0 -+ for iNdEx < l { -+ preIndex := iNdEx -+ var wire uint64 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowSketch -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ wire |= uint64(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ fieldNum := int32(wire >> 3) -+ wireType := int(wire & 0x7) -+ if wireType == 4 { -+ return fmt.Errorf(""proto: QuantileSketchMatrix: wiretype end group for non-group"") -+ } -+ if fieldNum <= 0 { -+ return fmt.Errorf(""proto: QuantileSketchMatrix: illegal tag %d (wire type %d)"", fieldNum, wire) -+ } -+ switch fieldNum { -+ case 1: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Values"", wireType) -+ } -+ var msglen int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowSketch -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ msglen |= int(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ if msglen < 0 { -+ return ErrInvalidLengthSketch -+ } -+ postIndex := iNdEx + msglen -+ if postIndex < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ m.Values = append(m.Values, &QuantileSketchVector{}) -+ if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { -+ return err -+ } -+ iNdEx = postIndex -+ default: -+ iNdEx = preIndex -+ skippy, err := skipSketch(dAtA[iNdEx:]) -+ if err != nil { -+ return err -+ } -+ if skippy < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if (iNdEx + skippy) < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if (iNdEx + skippy) > l { -+ return io.ErrUnexpectedEOF -+ } -+ iNdEx += skippy -+ } -+ } -+ -+ if iNdEx > l { -+ return io.ErrUnexpectedEOF -+ } -+ return nil -+} -+func (m *QuantileSketchVector) Unmarshal(dAtA []byte) error { -+ l := len(dAtA) -+ iNdEx := 0 -+ for iNdEx < l { -+ preIndex := iNdEx -+ var wire uint64 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowSketch -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ wire |= uint64(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ fieldNum := int32(wire >> 3) -+ wireType := int(wire & 0x7) -+ if wireType == 4 { -+ return fmt.Errorf(""proto: QuantileSketchVector: wiretype end group for non-group"") -+ } -+ if fieldNum <= 0 { -+ return fmt.Errorf(""proto: QuantileSketchVector: illegal tag %d (wire type %d)"", fieldNum, wire) -+ } -+ switch fieldNum { -+ case 1: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Samples"", wireType) -+ } -+ var msglen int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowSketch -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ msglen |= int(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ if msglen < 0 { -+ return ErrInvalidLengthSketch -+ } -+ postIndex := iNdEx + msglen -+ if postIndex < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ m.Samples = append(m.Samples, &QuantileSketchSample{}) -+ if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { -+ return err -+ } -+ iNdEx = postIndex -+ default: -+ iNdEx = preIndex -+ skippy, err := skipSketch(dAtA[iNdEx:]) -+ if err != nil { -+ return err -+ } -+ if skippy < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if (iNdEx + skippy) < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if (iNdEx + skippy) > l { -+ return io.ErrUnexpectedEOF -+ } -+ iNdEx += skippy -+ } -+ } -+ -+ if iNdEx > l { -+ return io.ErrUnexpectedEOF -+ } -+ return nil -+} -+func (m *QuantileSketchSample) Unmarshal(dAtA []byte) error { -+ l := len(dAtA) -+ iNdEx := 0 -+ for iNdEx < l { -+ preIndex := iNdEx -+ var wire uint64 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowSketch -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ wire |= uint64(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ fieldNum := int32(wire >> 3) -+ wireType := int(wire & 0x7) -+ if wireType == 4 { -+ return fmt.Errorf(""proto: QuantileSketchSample: wiretype end group for non-group"") -+ } -+ if fieldNum <= 0 { -+ return fmt.Errorf(""proto: QuantileSketchSample: illegal tag %d (wire type %d)"", fieldNum, wire) -+ } -+ switch fieldNum { -+ case 1: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field F"", wireType) -+ } -+ var msglen int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowSketch -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ msglen |= int(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ if msglen < 0 { -+ return ErrInvalidLengthSketch -+ } -+ postIndex := iNdEx + msglen -+ if postIndex < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ if m.F == nil { -+ m.F = &QuantileSketch{} -+ } -+ if err := m.F.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { -+ return err -+ } -+ iNdEx = postIndex -+ case 2: -+ if wireType != 0 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field TimestampMs"", wireType) -+ } -+ m.TimestampMs = 0 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowSketch -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ m.TimestampMs |= int64(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ case 3: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Metric"", wireType) -+ } -+ var msglen int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowSketch -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ msglen |= int(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ if msglen < 0 { -+ return ErrInvalidLengthSketch -+ } -+ postIndex := iNdEx + msglen -+ if postIndex < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ m.Metric = append(m.Metric, &LabelPair{}) -+ if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { -+ return err -+ } -+ iNdEx = postIndex -+ default: -+ iNdEx = preIndex -+ skippy, err := skipSketch(dAtA[iNdEx:]) -+ if err != nil { -+ return err -+ } -+ if skippy < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if (iNdEx + skippy) < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if (iNdEx + skippy) > l { -+ return io.ErrUnexpectedEOF -+ } -+ iNdEx += skippy -+ } -+ } -+ -+ if iNdEx > l { -+ return io.ErrUnexpectedEOF -+ } -+ return nil -+} -+func (m *QuantileSketch) Unmarshal(dAtA []byte) error { -+ l := len(dAtA) -+ iNdEx := 0 -+ for iNdEx < l { -+ preIndex := iNdEx -+ var wire uint64 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowSketch -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ wire |= uint64(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ fieldNum := int32(wire >> 3) -+ wireType := int(wire & 0x7) -+ if wireType == 4 { -+ return fmt.Errorf(""proto: QuantileSketch: wiretype end group for non-group"") -+ } -+ if fieldNum <= 0 { -+ return fmt.Errorf(""proto: QuantileSketch: illegal tag %d (wire type %d)"", fieldNum, wire) -+ } -+ switch fieldNum { -+ case 1: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Tdigest"", wireType) -+ } -+ var msglen int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowSketch -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ msglen |= int(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ if msglen < 0 { -+ return ErrInvalidLengthSketch -+ } -+ postIndex := iNdEx + msglen -+ if postIndex < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ v := &TDigest{} -+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { -+ return err -+ } -+ m.Sketch = &QuantileSketch_Tdigest{v} -+ iNdEx = postIndex -+ case 2: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Ddsketch"", wireType) -+ } -+ var byteLen int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowSketch -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ byteLen |= int(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ if byteLen < 0 { -+ return ErrInvalidLengthSketch -+ } -+ postIndex := iNdEx + byteLen -+ if postIndex < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ v := make([]byte, postIndex-iNdEx) -+ copy(v, dAtA[iNdEx:postIndex]) -+ m.Sketch = &QuantileSketch_Ddsketch{v} -+ iNdEx = postIndex -+ default: -+ iNdEx = preIndex -+ skippy, err := skipSketch(dAtA[iNdEx:]) -+ if err != nil { -+ return err -+ } -+ if skippy < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if (iNdEx + skippy) < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if (iNdEx + skippy) > l { -+ return io.ErrUnexpectedEOF -+ } -+ iNdEx += skippy -+ } -+ } -+ -+ if iNdEx > l { -+ return io.ErrUnexpectedEOF -+ } -+ return nil -+} -+func (m *TDigest) Unmarshal(dAtA []byte) error { -+ l := len(dAtA) -+ iNdEx := 0 -+ for iNdEx < l { -+ preIndex := iNdEx -+ var wire uint64 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowSketch -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ wire |= uint64(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ fieldNum := int32(wire >> 3) -+ wireType := int(wire & 0x7) -+ if wireType == 4 { -+ return fmt.Errorf(""proto: TDigest: wiretype end group for non-group"") -+ } -+ if fieldNum <= 0 { -+ return fmt.Errorf(""proto: TDigest: illegal tag %d (wire type %d)"", fieldNum, wire) -+ } -+ switch fieldNum { -+ case 1: -+ if wireType != 1 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Min"", wireType) -+ } -+ var v uint64 -+ if (iNdEx + 8) > l { -+ return io.ErrUnexpectedEOF -+ } -+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) -+ iNdEx += 8 -+ m.Min = float64(math.Float64frombits(v)) -+ case 2: -+ if wireType != 1 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Max"", wireType) -+ } -+ var v uint64 -+ if (iNdEx + 8) > l { -+ return io.ErrUnexpectedEOF -+ } -+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) -+ iNdEx += 8 -+ m.Max = float64(math.Float64frombits(v)) -+ case 3: -+ if wireType != 1 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Compression"", wireType) -+ } -+ var v uint64 -+ if (iNdEx + 8) > l { -+ return io.ErrUnexpectedEOF -+ } -+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) -+ iNdEx += 8 -+ m.Compression = float64(math.Float64frombits(v)) -+ case 4: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Processed"", wireType) -+ } -+ var msglen int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowSketch -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ msglen |= int(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ if msglen < 0 { -+ return ErrInvalidLengthSketch -+ } -+ postIndex := iNdEx + msglen -+ if postIndex < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ m.Processed = append(m.Processed, &TDigest_Centroid{}) -+ if err := m.Processed[len(m.Processed)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { -+ return err -+ } -+ iNdEx = postIndex -+ default: -+ iNdEx = preIndex -+ skippy, err := skipSketch(dAtA[iNdEx:]) -+ if err != nil { -+ return err -+ } -+ if skippy < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if (iNdEx + skippy) < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if (iNdEx + skippy) > l { -+ return io.ErrUnexpectedEOF -+ } -+ iNdEx += skippy -+ } -+ } -+ -+ if iNdEx > l { -+ return io.ErrUnexpectedEOF -+ } -+ return nil -+} -+func (m *TDigest_Centroid) Unmarshal(dAtA []byte) error { -+ l := len(dAtA) -+ iNdEx := 0 -+ for iNdEx < l { -+ preIndex := iNdEx -+ var wire uint64 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowSketch -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ wire |= uint64(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ fieldNum := int32(wire >> 3) -+ wireType := int(wire & 0x7) -+ if wireType == 4 { -+ return fmt.Errorf(""proto: Centroid: wiretype end group for non-group"") -+ } -+ if fieldNum <= 0 { -+ return fmt.Errorf(""proto: Centroid: illegal tag %d (wire type %d)"", fieldNum, wire) -+ } -+ switch fieldNum { -+ case 1: -+ if wireType != 1 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Mean"", wireType) -+ } -+ var v uint64 -+ if (iNdEx + 8) > l { -+ return io.ErrUnexpectedEOF -+ } -+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) -+ iNdEx += 8 -+ m.Mean = float64(math.Float64frombits(v)) -+ case 2: -+ if wireType != 1 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Weight"", wireType) -+ } -+ var v uint64 -+ if (iNdEx + 8) > l { -+ return io.ErrUnexpectedEOF -+ } -+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) -+ iNdEx += 8 -+ m.Weight = float64(math.Float64frombits(v)) -+ default: -+ iNdEx = preIndex -+ skippy, err := skipSketch(dAtA[iNdEx:]) -+ if err != nil { -+ return err -+ } -+ if skippy < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if (iNdEx + skippy) < 0 { -+ return ErrInvalidLengthSketch -+ } -+ if (iNdEx + skippy) > l { -+ return io.ErrUnexpectedEOF -+ } -+ iNdEx += skippy -+ } -+ } -+ -+ if iNdEx > l { -+ return io.ErrUnexpectedEOF -+ } -+ return nil -+} - func (m *CountMinSketch) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 -diff --git a/pkg/logproto/sketch.proto b/pkg/logproto/sketch.proto -index cba0731dcad9b..e84deaf20d4c8 100644 ---- a/pkg/logproto/sketch.proto -+++ b/pkg/logproto/sketch.proto -@@ -2,8 +2,45 @@ syntax = ""proto3""; - - package logproto; - -+import ""gogoproto/gogo.proto""; -+import ""pkg/logproto/logproto.proto""; -+ - option go_package = ""github.com/grafana/loki/pkg/logproto""; - -+message QuantileSketchMatrix { -+ repeated QuantileSketchVector values = 1; -+} -+ -+message QuantileSketchVector { -+ repeated QuantileSketchSample samples = 1; -+} -+ -+message QuantileSketchSample { -+ QuantileSketch f = 1; -+ int64 timestamp_ms = 2; -+ repeated LabelPair metric = 3; -+} -+ -+message QuantileSketch { -+ oneof sketch { -+ TDigest tdigest = 1; -+ bytes ddsketch = 2; // Use binary encoding for DDSketch. -+ } -+} -+ -+// ""Large"" bytes format from https://github.com/tdunning/t-digest -+message TDigest { -+ double min = 1; -+ double max = 2; -+ double compression = 3; -+ -+ message Centroid { -+ double mean = 1; -+ double weight = 2; -+ } -+ repeated Centroid processed = 4; -+} -+ - message CountMinSketch { - uint32 depth = 1; - uint32 width = 2; -diff --git a/pkg/logql/range_vector.go b/pkg/logql/range_vector.go -index ecfb7254b96f9..484949718f090 100644 ---- a/pkg/logql/range_vector.go -+++ b/pkg/logql/range_vector.go -@@ -452,17 +452,17 @@ func quantileOverTime(q float64) func(samples []promql.FPoint) float64 { - for _, v := range samples { - values = append(values, promql.Sample{F: v.F}) - } -- return quantile(q, values) -+ return Quantile(q, values) - } - } - --// quantile calculates the given quantile of a vector of samples. -+// Quantile calculates the given Quantile of a vector of samples. - // - // The Vector will be sorted. - // If 'values' has zero elements, NaN is returned. - // If q<0, -Inf is returned. - // If q>1, +Inf is returned. --func quantile(q float64, values vector.HeapByMaxValue) float64 { -+func Quantile(q float64, values vector.HeapByMaxValue) float64 { - if len(values) == 0 { - return math.NaN() - } -@@ -812,7 +812,7 @@ func (a *QuantileOverTime) agg(sample promql.FPoint) { - } - - func (a *QuantileOverTime) at() float64 { -- return quantile(a.q, a.values) -+ return Quantile(a.q, a.values) - } - - type FirstOverTime struct { -diff --git a/pkg/logql/sketch/quantile.go b/pkg/logql/sketch/quantile.go -new file mode 100644 -index 0000000000000..14b44e69f51c7 ---- /dev/null -+++ b/pkg/logql/sketch/quantile.go -@@ -0,0 +1,236 @@ -+package sketch -+ -+import ( -+ ""errors"" -+ ""fmt"" -+ -+ ""github.com/DataDog/sketches-go/ddsketch"" -+ ""github.com/influxdata/tdigest"" -+ ""github.com/prometheus/prometheus/model/labels"" -+ promql_parser ""github.com/prometheus/prometheus/promql/parser"" -+ -+ ""github.com/grafana/loki/pkg/logproto"" -+) -+ -+// QuantileSketchVector represents multiple qunatile sketches at the same point in -+// time. -+type QuantileSketchVector []quantileSketchSample -+ -+// QuantileSketchMatrix contains multiples QuantileSketchVectors across many -+// points in time. -+type QuantileSketchMatrix []QuantileSketchVector -+ -+// ToProto converts a quantile sketch vector to its protobuf definition. -+func (q QuantileSketchVector) ToProto() *logproto.QuantileSketchVector { -+ samples := make([]*logproto.QuantileSketchSample, len(q)) -+ for i, sample := range q { -+ samples[i] = sample.ToProto() -+ } -+ return &logproto.QuantileSketchVector{Samples: samples} -+} -+ -+func QuantileSketchVectorFromProto(proto *logproto.QuantileSketchVector) (QuantileSketchVector, error) { -+ out := make([]quantileSketchSample, len(proto.Samples)) -+ var err error -+ for i, s := range proto.Samples { -+ out[i], err = quantileSketchSampleFromProto(s) -+ if err != nil { -+ return nil, err -+ } -+ } -+ return out, nil -+} -+ -+func (QuantileSketchMatrix) String() string { -+ return ""QuantileSketchMatrix()"" -+} -+ -+func (QuantileSketchMatrix) Type() promql_parser.ValueType { return ""QuantileSketchMatrix"" } -+ -+func (m QuantileSketchMatrix) ToProto() *logproto.QuantileSketchMatrix { -+ values := make([]*logproto.QuantileSketchVector, len(m)) -+ for i, vec := range m { -+ values[i] = vec.ToProto() -+ } -+ return &logproto.QuantileSketchMatrix{Values: values} -+} -+ -+func QuantileSketchMatrixFromProto(proto *logproto.QuantileSketchMatrix) (QuantileSketchMatrix, error) { -+ out := make([]QuantileSketchVector, len(proto.Values)) -+ var err error -+ for i, v := range proto.Values { -+ out[i], err = QuantileSketchVectorFromProto(v) -+ if err != nil { -+ return nil, err -+ } -+ } -+ return out, nil -+} -+ -+type quantileSketchSample struct { -+ T int64 -+ F QuantileSketch -+ -+ Metric labels.Labels -+} -+ -+func (q quantileSketchSample) ToProto() *logproto.QuantileSketchSample { -+ metric := make([]*logproto.LabelPair, len(q.Metric)) -+ for i, m := range q.Metric { -+ metric[i] = &logproto.LabelPair{Name: m.Name, Value: m.Value} -+ } -+ -+ sketch := q.F.ToProto() -+ -+ return &logproto.QuantileSketchSample{ -+ F: sketch, -+ TimestampMs: q.T, -+ Metric: metric, -+ } -+} -+ -+func quantileSketchSampleFromProto(proto *logproto.QuantileSketchSample) (quantileSketchSample, error) { -+ sketch, err := QuantileSketchFromProto(proto.F) -+ if err != nil { -+ return quantileSketchSample{}, err -+ } -+ out := quantileSketchSample{ -+ T: proto.TimestampMs, -+ F: sketch, -+ Metric: make(labels.Labels, len(proto.Metric)), -+ } -+ -+ for i, p := range proto.Metric { -+ out.Metric[i] = labels.Label{Name: p.Name, Value: p.Value} -+ } -+ -+ return out, nil -+} -+ -+// QuantileSketch estimates quantiles over time. -+type QuantileSketch interface { -+ Add(float64) error -+ Quantile(float64) (float64, error) -+ Merge(QuantileSketch) (QuantileSketch, error) -+ ToProto() *logproto.QuantileSketch -+} -+ -+type QuantileSketchFactory func() QuantileSketch -+ -+func QuantileSketchFromProto(proto *logproto.QuantileSketch) (QuantileSketch, error) { -+ switch concrete := proto.Sketch.(type) { -+ case *logproto.QuantileSketch_Tdigest: -+ return TDigestQuantileFromProto(concrete.Tdigest), nil -+ case *logproto.QuantileSketch_Ddsketch: -+ return DDSketchQuantileFromProto(concrete.Ddsketch) -+ } -+ -+ return nil, fmt.Errorf(""unknown quantile sketch type: %T"", proto.Sketch) -+} -+ -+// DDSketchQuantile is a QuantileSketch implementation based on DataDog's -+// ""DDSketch: A fast and fully-mergeable quantile sketch with relative-error -+// guarantees."" paper. -+type DDSketchQuantile struct { -+ *ddsketch.DDSketch -+} -+ -+func NewDDSketch() *DDSketchQuantile { -+ s, _ := ddsketch.NewDefaultDDSketch(0.01) -+ return &DDSketchQuantile{s} -+} -+ -+func (d *DDSketchQuantile) Quantile(quantile float64) (float64, error) { -+ if quantile >= 1.0 || quantile <= 0 { -+ return 0.0, errors.New(""invalid quantile value, must be between 0.0 and 1.0 "") -+ } -+ return d.GetValueAtQuantile(quantile) -+} -+ -+func (d *DDSketchQuantile) Merge(other QuantileSketch) (QuantileSketch, error) { -+ cast, ok := other.(*DDSketchQuantile) -+ if !ok { -+ return nil, fmt.Errorf(""invalid sketch type: want %T, got %T"", d, cast) -+ } -+ -+ err := d.MergeWith(cast.DDSketch) -+ return d, err -+} -+ -+func (d *DDSketchQuantile) ToProto() *logproto.QuantileSketch { -+ sketch := &logproto.QuantileSketch_Ddsketch{} -+ d.DDSketch.Encode(&sketch.Ddsketch, false) -+ return &logproto.QuantileSketch{ -+ Sketch: sketch, -+ } -+} -+ -+func DDSketchQuantileFromProto(buf []byte) (*DDSketchQuantile, error) { -+ sketch := NewDDSketch() -+ err := sketch.DDSketch.DecodeAndMergeWith(buf) -+ return sketch, err -+} -+ -+type TDigestQuantile struct { -+ *tdigest.TDigest -+} -+ -+func NewTDigestSketch() QuantileSketch { -+ s := tdigest.New() -+ -+ return &TDigestQuantile{s} -+} -+ -+func (d *TDigestQuantile) Add(count float64) error { -+ d.TDigest.Add(count, 1) -+ return nil -+} -+ -+func (d *TDigestQuantile) Quantile(quantile float64) (float64, error) { -+ if quantile >= 1.0 || quantile <= 0 { -+ return 0.0, errors.New(""invalid quantile value, must be between 0.0 and 1.0 "") -+ } -+ return d.TDigest.Quantile(quantile), nil -+} -+ -+func (d *TDigestQuantile) Merge(other QuantileSketch) (QuantileSketch, error) { -+ cast, ok := other.(*TDigestQuantile) -+ if !ok { -+ return nil, fmt.Errorf(""invalid sketch type: want %T, got %T"", d, cast) -+ } -+ -+ d.TDigest.Merge(cast.TDigest) -+ return d, nil -+} -+ -+func (d *TDigestQuantile) ToProto() *logproto.QuantileSketch { -+ centroids := make(tdigest.CentroidList, 0) -+ centroids = d.Centroids(centroids) -+ processed := make([]*logproto.TDigest_Centroid, len(centroids)) -+ for i, c := range centroids { -+ processed[i] = &logproto.TDigest_Centroid{ -+ Mean: c.Mean, -+ Weight: c.Weight, -+ } -+ } -+ -+ return &logproto.QuantileSketch{ -+ Sketch: &logproto.QuantileSketch_Tdigest{ -+ Tdigest: &logproto.TDigest{ -+ Compression: d.Compression, -+ Processed: processed, -+ }, -+ }, -+ } -+} -+ -+func TDigestQuantileFromProto(proto *logproto.TDigest) *TDigestQuantile { -+ q := &TDigestQuantile{tdigest.NewWithCompression(proto.Compression)} -+ -+ centroids := make([]tdigest.Centroid, len(proto.Processed)) -+ for i, c := range proto.Processed { -+ centroids[i] = tdigest.Centroid{Mean: c.Mean, Weight: c.Weight} -+ } -+ q.AddCentroidList(centroids) -+ return q -+} -diff --git a/pkg/logql/sketch/quantile_test.go b/pkg/logql/sketch/quantile_test.go -new file mode 100644 -index 0000000000000..3b2f34c0e87c8 ---- /dev/null -+++ b/pkg/logql/sketch/quantile_test.go -@@ -0,0 +1,81 @@ -+package sketch -+ -+import ( -+ ""fmt"" -+ ""math/rand"" -+ ""sort"" -+ ""testing"" -+ -+ ""github.com/gogo/protobuf/proto"" -+ ""github.com/prometheus/prometheus/promql"" -+ ""github.com/stretchr/testify/require"" -+ -+ ""github.com/grafana/loki/pkg/logql"" -+ ""github.com/grafana/loki/pkg/logql/vector"" -+) -+ -+func TestQuantiles(t *testing.T) { -+ // v controls the distribution of values along the curve, a greater v -+ // value means there's a large distance between generated values -+ vs := []float64{1.0, 5.0, 10.0} -+ // s controls the exponential curve of the distribution -+ // the higher the s values the faster the drop off from max value to lesser values -+ // s must be > 1.0 -+ ss := []float64{1.01, 2.0, 3.0, 4.0} -+ -+ // T-Digest is too big for 1_000 samples. However, we did not optimize -+ // the format for size. -+ nSamples := []int{5_000, 10_000, 100_000, 1_000_000} -+ -+ factories := []struct { -+ newSketch QuantileSketchFactory -+ name string -+ relativeError float64 -+ }{ -+ {newSketch: func() QuantileSketch { return NewDDSketch() }, name: ""DDSketch"", relativeError: 0.02}, -+ {newSketch: NewTDigestSketch, name: ""T-Digest"", relativeError: 0.05}, -+ } -+ -+ for _, tc := range factories { -+ for _, samplesCount := range nSamples { -+ for _, s := range ss { -+ for _, v := range vs { -+ t.Run(fmt.Sprintf(""sketch=%s, s=%.2f, v=%.2f, events=%d"", tc.name, s, v, samplesCount), func(t *testing.T) { -+ sketch := tc.newSketch() -+ -+ r := rand.New(rand.NewSource(42)) -+ z := rand.NewZipf(r, s, v, 1_000) -+ values := make(vector.HeapByMaxValue, 0) -+ for i := 0; i < samplesCount; i++ { -+ -+ value := float64(z.Uint64()) -+ values = append(values, promql.Sample{F: value}) -+ err := sketch.Add(value) -+ require.NoError(t, err) -+ } -+ sort.Sort(values) -+ -+ // Size -+ var buf []byte -+ var err error -+ switch s := sketch.(type) { -+ case *DDSketchQuantile: -+ buf, err = proto.Marshal(s.DDSketch.ToProto()) -+ require.NoError(t, err) -+ case *TDigestQuantile: -+ buf, err = proto.Marshal(s.ToProto()) -+ require.NoError(t, err) -+ } -+ require.Less(t, len(buf), samplesCount*8) -+ -+ // Accuracy -+ expected := logql.Quantile(0.99, values) -+ actual, err := sketch.Quantile(0.99) -+ require.NoError(t, err) -+ require.InEpsilonf(t, expected, actual, tc.relativeError, ""expected quantile %f, actual quantile %f"", expected, actual) -+ }) -+ } -+ } -+ } -+ } -+} -diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go -index 7fc9149cb086a..65e9d84b41d14 100644 ---- a/pkg/querier/queryrange/codec.go -+++ b/pkg/querier/queryrange/codec.go -@@ -681,6 +681,8 @@ func decodeResponseProtobuf(r *http.Response, req queryrangebase.Request) (query - return concrete.Streams.WithHeaders(headers), nil - case *QueryResponse_TopkSketches: - return concrete.TopkSketches.WithHeaders(headers), nil -+ case *QueryResponse_QuantileSketches: -+ return concrete.QuantileSketches.WithHeaders(headers), nil - default: - return nil, httpgrpc.Errorf(http.StatusInternalServerError, ""unsupported response type, got (%t)"", resp.Response) - } -@@ -797,6 +799,8 @@ func encodeResponseProtobuf(ctx context.Context, res queryrangebase.Response) (* - p.Response = &QueryResponse_Stats{response} - case *TopKSketchesResponse: - p.Response = &QueryResponse_TopkSketches{response} -+ case *QuantileSketchResponse: -+ p.Response = &QueryResponse_QuantileSketches{response} - default: - return nil, httpgrpc.Errorf(http.StatusInternalServerError, fmt.Sprintf(""invalid response format, got (%T)"", res)) - } -diff --git a/pkg/querier/queryrange/downstreamer.go b/pkg/querier/queryrange/downstreamer.go -index f9e6ccdd32c5e..a98998f4ee793 100644 ---- a/pkg/querier/queryrange/downstreamer.go -+++ b/pkg/querier/queryrange/downstreamer.go -@@ -258,6 +258,15 @@ func ResponseToResult(resp queryrangebase.Response) (logqlmodel.Result, error) { - return logqlmodel.Result{}, fmt.Errorf(""cannot decode topk sketch: %w"", err) - } - -+ return logqlmodel.Result{ -+ Data: matrix, -+ Headers: resp.GetHeaders(), -+ }, nil -+ case *QuantileSketchResponse: -+ matrix, err := sketch.QuantileSketchMatrixFromProto(r.Response) -+ if err != nil { -+ return logqlmodel.Result{}, fmt.Errorf(""cannot decode quantile sketch: %w"", err) -+ } - return logqlmodel.Result{ - Data: matrix, - Headers: resp.GetHeaders(), -diff --git a/pkg/querier/queryrange/extensions.go b/pkg/querier/queryrange/extensions.go -index f773c694f425e..b19cb55f29698 100644 ---- a/pkg/querier/queryrange/extensions.go -+++ b/pkg/querier/queryrange/extensions.go -@@ -110,3 +110,16 @@ func (m *TopKSketchesResponse) WithHeaders(h []queryrangebase.PrometheusResponse - m.Headers = h - return m - } -+ -+// GetHeaders returns the HTTP headers in the response. -+func (m *QuantileSketchResponse) GetHeaders() []*queryrangebase.PrometheusResponseHeader { -+ if m != nil { -+ return convertPrometheusResponseHeadersToPointers(m.Headers) -+ } -+ return nil -+} -+ -+func (m *QuantileSketchResponse) WithHeaders(h []queryrangebase.PrometheusResponseHeader) queryrangebase.Response { -+ m.Headers = h -+ return m -+} -diff --git a/pkg/querier/queryrange/marshal.go b/pkg/querier/queryrange/marshal.go -index 92e23605cca61..7cb8f74094595 100644 ---- a/pkg/querier/queryrange/marshal.go -+++ b/pkg/querier/queryrange/marshal.go -@@ -231,6 +231,12 @@ func ResultToResponse(result logqlmodel.Result, params *logql.LiteralParams) (*Q - TopkSketches: &TopKSketchesResponse{Response: sk}, - }, - }, nil -+ case sketch.QuantileSketchMatrix: -+ return &QueryResponse{ -+ Response: &QueryResponse_QuantileSketches{ -+ QuantileSketches: &QuantileSketchResponse{Response: data.ToProto()}, -+ }, -+ }, nil - } - - return nil, fmt.Errorf(""unsupported data type: %t"", result.Data) -diff --git a/pkg/querier/queryrange/queryrange.pb.go b/pkg/querier/queryrange/queryrange.pb.go -index 954b80d29c516..81728a772c703 100644 ---- a/pkg/querier/queryrange/queryrange.pb.go -+++ b/pkg/querier/queryrange/queryrange.pb.go -@@ -882,6 +882,43 @@ func (m *TopKSketchesResponse) XXX_DiscardUnknown() { - - var xxx_messageInfo_TopKSketchesResponse proto.InternalMessageInfo - -+type QuantileSketchResponse struct { -+ Response *github_com_grafana_loki_pkg_logproto.QuantileSketchMatrix `protobuf:""bytes,1,opt,name=response,proto3,customtype=github.com/grafana/loki/pkg/logproto.QuantileSketchMatrix"" json:""response,omitempty""` -+ Headers []github_com_grafana_loki_pkg_querier_queryrange_queryrangebase_definitions.PrometheusResponseHeader `protobuf:""bytes,2,rep,name=Headers,proto3,customtype=github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions.PrometheusResponseHeader"" json:""-""` -+} -+ -+func (m *QuantileSketchResponse) Reset() { *m = QuantileSketchResponse{} } -+func (*QuantileSketchResponse) ProtoMessage() {} -+func (*QuantileSketchResponse) Descriptor() ([]byte, []int) { -+ return fileDescriptor_51b9d53b40d11902, []int{13} -+} -+func (m *QuantileSketchResponse) XXX_Unmarshal(b []byte) error { -+ return m.Unmarshal(b) -+} -+func (m *QuantileSketchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { -+ if deterministic { -+ return xxx_messageInfo_QuantileSketchResponse.Marshal(b, m, deterministic) -+ } else { -+ b = b[:cap(b)] -+ n, err := m.MarshalToSizedBuffer(b) -+ if err != nil { -+ return nil, err -+ } -+ return b[:n], nil -+ } -+} -+func (m *QuantileSketchResponse) XXX_Merge(src proto.Message) { -+ xxx_messageInfo_QuantileSketchResponse.Merge(m, src) -+} -+func (m *QuantileSketchResponse) XXX_Size() int { -+ return m.Size() -+} -+func (m *QuantileSketchResponse) XXX_DiscardUnknown() { -+ xxx_messageInfo_QuantileSketchResponse.DiscardUnknown(m) -+} -+ -+var xxx_messageInfo_QuantileSketchResponse proto.InternalMessageInfo -+ - type QueryResponse struct { - // Types that are valid to be assigned to Response: - // *QueryResponse_Series -@@ -891,13 +928,14 @@ type QueryResponse struct { - // *QueryResponse_Streams - // *QueryResponse_Volume - // *QueryResponse_TopkSketches -+ // *QueryResponse_QuantileSketches - Response isQueryResponse_Response `protobuf_oneof:""response""` - } - - func (m *QueryResponse) Reset() { *m = QueryResponse{} } - func (*QueryResponse) ProtoMessage() {} - func (*QueryResponse) Descriptor() ([]byte, []int) { -- return fileDescriptor_51b9d53b40d11902, []int{13} -+ return fileDescriptor_51b9d53b40d11902, []int{14} - } - func (m *QueryResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -@@ -954,14 +992,18 @@ type QueryResponse_Volume struct { - type QueryResponse_TopkSketches struct { - TopkSketches *TopKSketchesResponse `protobuf:""bytes,7,opt,name=topkSketches,proto3,oneof""` - } -+type QueryResponse_QuantileSketches struct { -+ QuantileSketches *QuantileSketchResponse `protobuf:""bytes,8,opt,name=quantileSketches,proto3,oneof""` -+} - --func (*QueryResponse_Series) isQueryResponse_Response() {} --func (*QueryResponse_Labels) isQueryResponse_Response() {} --func (*QueryResponse_Stats) isQueryResponse_Response() {} --func (*QueryResponse_Prom) isQueryResponse_Response() {} --func (*QueryResponse_Streams) isQueryResponse_Response() {} --func (*QueryResponse_Volume) isQueryResponse_Response() {} --func (*QueryResponse_TopkSketches) isQueryResponse_Response() {} -+func (*QueryResponse_Series) isQueryResponse_Response() {} -+func (*QueryResponse_Labels) isQueryResponse_Response() {} -+func (*QueryResponse_Stats) isQueryResponse_Response() {} -+func (*QueryResponse_Prom) isQueryResponse_Response() {} -+func (*QueryResponse_Streams) isQueryResponse_Response() {} -+func (*QueryResponse_Volume) isQueryResponse_Response() {} -+func (*QueryResponse_TopkSketches) isQueryResponse_Response() {} -+func (*QueryResponse_QuantileSketches) isQueryResponse_Response() {} - - func (m *QueryResponse) GetResponse() isQueryResponse_Response { - if m != nil { -@@ -1019,6 +1061,13 @@ func (m *QueryResponse) GetTopkSketches() *TopKSketchesResponse { - return nil - } - -+func (m *QueryResponse) GetQuantileSketches() *QuantileSketchResponse { -+ if x, ok := m.GetResponse().(*QueryResponse_QuantileSketches); ok { -+ return x.QuantileSketches -+ } -+ return nil -+} -+ - // XXX_OneofWrappers is for the internal use of the proto package. - func (*QueryResponse) XXX_OneofWrappers() []interface{} { - return []interface{}{ -@@ -1029,6 +1078,7 @@ func (*QueryResponse) XXX_OneofWrappers() []interface{} { - (*QueryResponse_Streams)(nil), - (*QueryResponse_Volume)(nil), - (*QueryResponse_TopkSketches)(nil), -+ (*QueryResponse_QuantileSketches)(nil), - } - } - -@@ -1046,6 +1096,7 @@ func init() { - proto.RegisterType((*VolumeRequest)(nil), ""queryrange.VolumeRequest"") - proto.RegisterType((*VolumeResponse)(nil), ""queryrange.VolumeResponse"") - proto.RegisterType((*TopKSketchesResponse)(nil), ""queryrange.TopKSketchesResponse"") -+ proto.RegisterType((*QuantileSketchResponse)(nil), ""queryrange.QuantileSketchResponse"") - proto.RegisterType((*QueryResponse)(nil), ""queryrange.QueryResponse"") - } - -@@ -1054,90 +1105,93 @@ func init() { - } - - var fileDescriptor_51b9d53b40d11902 = []byte{ -- // 1316 bytes of a gzipped FileDescriptorProto -- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0xcd, 0x6f, 0x1b, 0x45, -- 0x14, 0xf7, 0xac, 0x3f, 0x12, 0x4f, 0x9a, 0x00, 0x93, 0xd0, 0x2e, 0xa1, 0xda, 0xb5, 0x2c, 0x41, -- 0x8d, 0x04, 0xbb, 0x22, 0x29, 0x2d, 0x5f, 0x42, 0x74, 0x5b, 0xaa, 0x44, 0x14, 0x04, 0xdb, 0x88, -- 0xfb, 0x24, 0x9e, 0xd8, 0x4b, 0xbc, 0x1f, 0x9d, 0x19, 0x57, 0xed, 0x8d, 0x2b, 0x12, 0x48, 0xfd, -- 0x2b, 0x10, 0x12, 0x15, 0x17, 0x6e, 0x48, 0xdc, 0x7b, 0xcc, 0xb1, 0x8a, 0xc4, 0x42, 0xdd, 0x0b, -- 0xe4, 0xd4, 0x3f, 0x01, 0xcd, 0xcc, 0xee, 0x7a, 0xd6, 0x71, 0xdb, 0x38, 0xbd, 0xb4, 0x12, 0x17, -- 0x7b, 0x3e, 0xde, 0x6f, 0x76, 0xde, 0xef, 0xfd, 0xde, 0x9b, 0x19, 0x78, 0x2e, 0xd9, 0xeb, 0xb9, -- 0x37, 0x86, 0x84, 0x06, 0x84, 0xca, 0xff, 0xdb, 0x14, 0x47, 0x3d, 0xa2, 0x35, 0x9d, 0x84, 0xc6, -- 0x3c, 0x46, 0x70, 0x3c, 0xb2, 0xba, 0xd2, 0x8b, 0x7b, 0xb1, 0x1c, 0x76, 0x45, 0x4b, 0x59, 0xac, -- 0xda, 0xbd, 0x38, 0xee, 0x0d, 0x88, 0x2b, 0x7b, 0xdb, 0xc3, 0x5d, 0x97, 0x07, 0x21, 0x61, 0x1c, -- 0x87, 0x49, 0x66, 0xf0, 0xba, 0xf8, 0xd6, 0x20, 0xee, 0x29, 0x64, 0xde, 0xc8, 0x26, 0x5f, 0x2b, -- 0x4d, 0xb2, 0x3d, 0xc2, 0x77, 0xfa, 0xd9, 0x54, 0x2b, 0x9b, 0xba, 0x31, 0x08, 0xe3, 0x2e, 0x19, -- 0xb8, 0x8c, 0x63, 0xce, 0xd4, 0x6f, 0x66, 0xb1, 0x2c, 0x2c, 0x92, 0x21, 0xeb, 0xcb, 0x9f, 0x6c, -- 0xf0, 0xf2, 0x53, 0x5d, 0xdb, 0xc6, 0x8c, 0xb8, 0x5d, 0xb2, 0x1b, 0x44, 0x01, 0x0f, 0xe2, 0x88, -- 0xe9, 0xed, 0x6c, 0x91, 0x0b, 0xc7, 0x5b, 0x64, 0x92, 0xae, 0xf6, 0xbe, 0x01, 0x17, 0xae, 0xc5, -- 0x7b, 0x81, 0x4f, 0x6e, 0x0c, 0x09, 0xe3, 0x68, 0x05, 0xd6, 0xa5, 0x8d, 0x09, 0x5a, 0xa0, 0xd3, -- 0xf4, 0x55, 0x47, 0x8c, 0x0e, 0x82, 0x30, 0xe0, 0xa6, 0xd1, 0x02, 0x9d, 0x45, 0x5f, 0x75, 0x10, -- 0x82, 0x35, 0xc6, 0x49, 0x62, 0x56, 0x5b, 0xa0, 0x53, 0xf5, 0x65, 0x1b, 0xad, 0xc2, 0xf9, 0x20, -- 0xe2, 0x84, 0xde, 0xc4, 0x03, 0xb3, 0x29, 0xc7, 0x8b, 0x3e, 0xfa, 0x04, 0xce, 0x31, 0x8e, 0x29, -- 0xdf, 0x62, 0x66, 0xad, 0x05, 0x3a, 0x0b, 0x6b, 0xab, 0x8e, 0x0a, 0x85, 0x93, 0x87, 0xc2, 0xd9, -- 0xca, 0x43, 0xe1, 0xcd, 0xdf, 0x4b, 0xed, 0xca, 0x9d, 0xbf, 0x6c, 0xe0, 0xe7, 0x20, 0xf4, 0x21, -- 0xac, 0x93, 0xa8, 0xbb, 0xc5, 0xcc, 0xfa, 0x0c, 0x68, 0x05, 0x41, 0xef, 0xc2, 0x66, 0x37, 0xa0, -- 0x64, 0x47, 0x70, 0x66, 0x36, 0x5a, 0xa0, 0xb3, 0xb4, 0xb6, 0xec, 0x14, 0xa1, 0xbd, 0x92, 0x4f, -- 0xf9, 0x63, 0x2b, 0xe1, 0x5e, 0x82, 0x79, 0xdf, 0x9c, 0x93, 0x4c, 0xc8, 0x36, 0x6a, 0xc3, 0x06, -- 0xeb, 0x63, 0xda, 0x65, 0xe6, 0x7c, 0xab, 0xda, 0x69, 0x7a, 0xf0, 0x30, 0xb5, 0xb3, 0x11, 0x3f, -- 0xfb, 0x6f, 0xff, 0x0b, 0x20, 0x12, 0x94, 0x6e, 0x46, 0x8c, 0xe3, 0x88, 0x9f, 0x84, 0xd9, 0x8f, -- 0x61, 0x43, 0x88, 0x72, 0x8b, 0x49, 0x6e, 0x8f, 0xeb, 0x6a, 0x86, 0x29, 0xfb, 0x5a, 0x9b, 0xc9, -- 0xd7, 0xfa, 0x54, 0x5f, 0x1b, 0x8f, 0xf5, 0xf5, 0x97, 0x1a, 0x3c, 0xa5, 0xe4, 0xc3, 0x92, 0x38, -- 0x62, 0x44, 0x80, 0xae, 0x73, 0xcc, 0x87, 0x4c, 0xb9, 0x99, 0x81, 0xe4, 0x88, 0x9f, 0xcd, 0xa0, -- 0x4f, 0x61, 0xed, 0x0a, 0xe6, 0x58, 0xba, 0xbc, 0xb0, 0xb6, 0xe2, 0x68, 0xa2, 0x14, 0x6b, 0x89, -- 0x39, 0xef, 0xb4, 0xf0, 0xea, 0x30, 0xb5, 0x97, 0xba, 0x98, 0xe3, 0xb7, 0xe3, 0x30, 0xe0, 0x24, -- 0x4c, 0xf8, 0x6d, 0x5f, 0x22, 0xd1, 0x7b, 0xb0, 0xf9, 0x19, 0xa5, 0x31, 0xdd, 0xba, 0x9d, 0x10, -- 0x49, 0x51, 0xd3, 0x3b, 0x73, 0x98, 0xda, 0xcb, 0x24, 0x1f, 0xd4, 0x10, 0x63, 0x4b, 0xf4, 0x16, -- 0xac, 0xcb, 0x8e, 0x24, 0xa5, 0xe9, 0x2d, 0x1f, 0xa6, 0xf6, 0x4b, 0x12, 0xa2, 0x99, 0x2b, 0x8b, -- 0x32, 0x87, 0xf5, 0x63, 0x71, 0x58, 0x84, 0xb2, 0xa1, 0x87, 0xd2, 0x84, 0x73, 0x37, 0x09, 0x65, -- 0x62, 0x99, 0x39, 0x39, 0x9e, 0x77, 0xd1, 0x25, 0x08, 0x05, 0x31, 0x01, 0xe3, 0xc1, 0x8e, 0xd0, -- 0x93, 0x20, 0x63, 0xd1, 0x51, 0xe5, 0xc2, 0x27, 0x6c, 0x38, 0xe0, 0x1e, 0xca, 0x58, 0xd0, 0x0c, -- 0x7d, 0xad, 0x8d, 0xee, 0x02, 0x38, 0xb7, 0x41, 0x70, 0x97, 0x50, 0x66, 0x36, 0x5b, 0xd5, 0xce, -- 0xc2, 0xda, 0x1b, 0x8e, 0x5e, 0x1b, 0xbe, 0xa2, 0x71, 0x48, 0x78, 0x9f, 0x0c, 0x59, 0x1e, 0x20, -- 0x65, 0xed, 0xed, 0x1d, 0xa4, 0xf6, 0x76, 0x2f, 0xe0, 0xfd, 0xe1, 0xb6, 0xb3, 0x13, 0x87, 0x6e, -- 0x8f, 0xe2, 0x5d, 0x1c, 0x61, 0x77, 0x10, 0xef, 0x05, 0xee, 0xcc, 0xf5, 0xe8, 0xb1, 0xdf, 0x39, -- 0x4c, 0x6d, 0xf0, 0x8e, 0x9f, 0x6f, 0xb1, 0xfd, 0x27, 0x80, 0xaf, 0x88, 0x08, 0x5f, 0x17, 0x6b, -- 0x33, 0x2d, 0x31, 0x42, 0xcc, 0x77, 0xfa, 0x26, 0x10, 0x32, 0xf3, 0x55, 0x47, 0x2f, 0x16, 0xc6, -- 0x33, 0x15, 0x8b, 0xea, 0xec, 0xc5, 0x22, 0xcf, 0x86, 0xda, 0xd4, 0x6c, 0xa8, 0x3f, 0x36, 0x1b, -- 0x7e, 0xa8, 0xaa, 0xcc, 0xcf, 0xfd, 0x9b, 0x21, 0x27, 0xae, 0x16, 0x39, 0x51, 0x95, 0xbb, 0x2d, -- 0xa4, 0xa6, 0xd6, 0xda, 0xec, 0x92, 0x88, 0x07, 0xbb, 0x01, 0xa1, 0x4f, 0xc9, 0x0c, 0x4d, 0x6e, -- 0xd5, 0xb2, 0xdc, 0x74, 0xad, 0xd4, 0x9e, 0x7b, 0xad, 0x4c, 0x64, 0x47, 0xfd, 0x04, 0xd9, 0xd1, -- 0xfe, 0x03, 0xc0, 0x57, 0x45, 0x38, 0xae, 0xe1, 0x6d, 0x32, 0xf8, 0x12, 0x87, 0x63, 0xc9, 0x69, -- 0xe2, 0x02, 0xcf, 0x24, 0x2e, 0xe3, 0xe4, 0xe2, 0xaa, 0x6a, 0xe2, 0x2a, 0xce, 0x86, 0x9a, 0x76, -- 0x36, 0xb4, 0x1f, 0x19, 0xf0, 0xf4, 0xe4, 0xfe, 0x67, 0x90, 0xd4, 0x9b, 0x9a, 0xa4, 0x9a, 0x1e, -- 0xfa, 0x5f, 0x32, 0xc7, 0x90, 0xcc, 0x4f, 0x00, 0xce, 0xe7, 0x67, 0x10, 0x72, 0x20, 0x54, 0x30, -- 0x79, 0xcc, 0x28, 0xa2, 0x97, 0x04, 0x98, 0x16, 0xa3, 0xbe, 0x66, 0x81, 0xbe, 0x85, 0x0d, 0xd5, -- 0xcb, 0xb2, 0xf8, 0x8c, 0x96, 0xc5, 0x9c, 0x12, 0x1c, 0x5e, 0xea, 0xe2, 0x84, 0x13, 0xea, 0x7d, -- 0x20, 0x76, 0x71, 0x90, 0xda, 0xe7, 0x9e, 0x44, 0x91, 0xbc, 0x21, 0x2a, 0x9c, 0x08, 0xae, 0xfa, -- 0xa6, 0x9f, 0x7d, 0xa1, 0xfd, 0x23, 0x80, 0x2f, 0x8b, 0x8d, 0x0a, 0x6a, 0x0a, 0x55, 0x5c, 0x81, -- 0xf3, 0x34, 0x6b, 0x67, 0xba, 0x6e, 0x3b, 0x65, 0x5a, 0xa7, 0x50, 0xe9, 0xd5, 0xee, 0xa5, 0x36, -- 0xf0, 0x0b, 0x24, 0x5a, 0x2f, 0xd1, 0x68, 0x4c, 0xa3, 0x51, 0x40, 0x2a, 0x25, 0xe2, 0x7e, 0x37, -- 0x20, 0xda, 0x8c, 0xba, 0xe4, 0x96, 0x10, 0xdf, 0x58, 0xa7, 0xc3, 0x23, 0x3b, 0x3a, 0x3b, 0x26, -- 0xe5, 0xa8, 0xbd, 0xf7, 0xd1, 0x41, 0x6a, 0x5f, 0x7c, 0x12, 0x2b, 0x4f, 0x00, 0x6b, 0x2e, 0xe8, -- 0xc2, 0x35, 0x9e, 0xff, 0x73, 0xf1, 0x7b, 0x03, 0x2e, 0x7e, 0x13, 0x0f, 0x86, 0x21, 0xc9, 0x0b, -- 0xd4, 0x65, 0x58, 0xdb, 0xa5, 0x71, 0x28, 0x39, 0xab, 0x7a, 0xee, 0x14, 0xbd, 0x24, 0xc5, 0xd2, -- 0xee, 0x4e, 0x1c, 0x86, 0x71, 0xe4, 0xca, 0x47, 0x87, 0xac, 0x3c, 0xbe, 0x04, 0xa3, 0x4d, 0x38, -- 0xc7, 0xfb, 0x34, 0x1e, 0xf6, 0xfa, 0x32, 0x8a, 0x27, 0x58, 0x27, 0xc7, 0x8b, 0x6b, 0xbd, 0x3c, -- 0x96, 0x05, 0xa1, 0xaa, 0x70, 0x15, 0xfd, 0xf1, 0xbd, 0x47, 0x14, 0xaf, 0xfa, 0xe4, 0xe3, 0xa0, -- 0xae, 0x3d, 0x0e, 0xda, 0xf0, 0x14, 0xc7, 0xb4, 0x47, 0xb8, 0xac, 0x68, 0xd9, 0xbd, 0xd2, 0x2f, -- 0x8d, 0xb5, 0x7f, 0x35, 0xe0, 0x52, 0xce, 0x45, 0x16, 0xcd, 0xf0, 0x88, 0x88, 0xcc, 0xb1, 0x88, -- 0xca, 0xb6, 0xde, 0xc5, 0x83, 0xd4, 0x5e, 0x3f, 0x96, 0x80, 0xca, 0xc0, 0x17, 0x57, 0x3c, 0x77, -- 0x0d, 0xb8, 0xb2, 0x15, 0x27, 0x9f, 0x5f, 0x97, 0x4f, 0x51, 0xed, 0x8c, 0x20, 0x47, 0x68, 0x5b, -- 0x19, 0xd3, 0x26, 0x10, 0x5f, 0x60, 0x4e, 0x83, 0x5b, 0xde, 0xfa, 0x41, 0x6a, 0xbb, 0xc7, 0xa2, -- 0x6c, 0x0c, 0x7a, 0x71, 0xe9, 0xfa, 0xad, 0x0a, 0x17, 0xbf, 0x16, 0xab, 0x14, 0x3c, 0xbd, 0x0f, -- 0x1b, 0x4c, 0x5e, 0xb2, 0x32, 0x96, 0xac, 0xc9, 0x07, 0x49, 0xf9, 0x3a, 0xb7, 0x51, 0xf1, 0x33, -- 0x7b, 0xf1, 0x4c, 0x1b, 0x28, 0x25, 0x1b, 0x47, 0xaa, 0xad, 0x33, 0xfd, 0xe4, 0x16, 0x68, 0x85, -- 0x41, 0x17, 0x60, 0x5d, 0x16, 0xd5, 0xec, 0x86, 0x5a, 0xfa, 0xec, 0xd1, 0xea, 0xb6, 0x51, 0xf1, -- 0x95, 0x39, 0x5a, 0x83, 0x35, 0x91, 0xb4, 0xd9, 0x1b, 0xfa, 0xec, 0xe4, 0x37, 0xf5, 0x13, 0x61, -- 0xa3, 0xe2, 0x4b, 0x5b, 0x74, 0x5e, 0x5c, 0x78, 0xc4, 0x51, 0x92, 0x9f, 0x8b, 0xe6, 0x24, 0x4c, -- 0x83, 0xe4, 0xa6, 0xe8, 0x3c, 0x6c, 0xdc, 0x94, 0x59, 0x22, 0x9f, 0x34, 0xe2, 0x9e, 0xa3, 0x81, -- 0xca, 0xf9, 0x23, 0xfc, 0x52, 0xb6, 0xe8, 0x2a, 0x3c, 0xc5, 0xe3, 0x64, 0x2f, 0xd7, 0xa3, 0x7c, -- 0xf6, 0x2c, 0xac, 0xb5, 0x74, 0xec, 0x34, 0xbd, 0x6e, 0x54, 0xfc, 0x12, 0xce, 0x83, 0x63, 0xfd, -- 0x7a, 0xe7, 0xf7, 0x1f, 0x58, 0x95, 0xfb, 0x0f, 0xac, 0xca, 0xa3, 0x07, 0x16, 0xf8, 0x6e, 0x64, -- 0x81, 0x9f, 0x47, 0x16, 0xb8, 0x37, 0xb2, 0xc0, 0xfe, 0xc8, 0x02, 0x7f, 0x8f, 0x2c, 0xf0, 0xcf, -- 0xc8, 0xaa, 0x3c, 0x1a, 0x59, 0xe0, 0xce, 0x43, 0xab, 0xb2, 0xff, 0xd0, 0xaa, 0xdc, 0x7f, 0x68, -- 0x55, 0xb6, 0x1b, 0x52, 0xb6, 0xeb, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x4d, 0xcf, 0xdd, 0x9a, -- 0x3d, 0x12, 0x00, 0x00, -+ // 1376 bytes of a gzipped FileDescriptorProto -+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4d, 0x6f, 0x1b, 0x45, -+ 0x18, 0xde, 0x5d, 0x7f, 0x24, 0x9e, 0x34, 0xa1, 0x4c, 0x42, 0xbb, 0x84, 0x6a, 0xd7, 0xb2, 0x04, -+ 0x35, 0x12, 0x78, 0x45, 0x52, 0x5a, 0xca, 0x97, 0xe8, 0xb6, 0x54, 0x89, 0x28, 0xa8, 0xdd, 0x46, -+ 0xdc, 0x27, 0xf1, 0xc4, 0x5e, 0xe2, 0xfd, 0xc8, 0xcc, 0xb8, 0x6a, 0x6e, 0x5c, 0x38, 0x20, 0x81, -+ 0xd4, 0x5f, 0x81, 0x90, 0xa8, 0xf8, 0x01, 0x48, 0x5c, 0x38, 0xf5, 0x98, 0x63, 0x15, 0x89, 0x85, -+ 0xba, 0x17, 0xc8, 0xa9, 0x3f, 0x01, 0xcd, 0xcc, 0xee, 0x7a, 0xd6, 0x76, 0x5b, 0x27, 0xbd, 0x34, -+ 0x12, 0x97, 0x78, 0x3e, 0xde, 0x67, 0x76, 0xde, 0x67, 0x9e, 0xf7, 0x7d, 0x67, 0x02, 0xce, 0xc7, -+ 0x3b, 0x1d, 0x67, 0xb7, 0x8f, 0x89, 0x8f, 0x89, 0xf8, 0xdd, 0x23, 0x28, 0xec, 0x60, 0xa5, 0xd9, -+ 0x8a, 0x49, 0xc4, 0x22, 0x08, 0x86, 0x23, 0xcb, 0x4b, 0x9d, 0xa8, 0x13, 0x89, 0x61, 0x87, 0xb7, -+ 0xa4, 0xc5, 0xb2, 0xdd, 0x89, 0xa2, 0x4e, 0x0f, 0x3b, 0xa2, 0xb7, 0xd9, 0xdf, 0x76, 0x98, 0x1f, -+ 0x60, 0xca, 0x50, 0x10, 0xa7, 0x06, 0x6f, 0xf0, 0x6f, 0xf5, 0xa2, 0x8e, 0x44, 0x66, 0x8d, 0x74, -+ 0xf2, 0xf5, 0xc2, 0x24, 0xdd, 0xc1, 0x6c, 0xab, 0x9b, 0x4e, 0xd5, 0xd3, 0xa9, 0xdd, 0x5e, 0x10, -+ 0xb5, 0x71, 0xcf, 0xa1, 0x0c, 0x31, 0x2a, 0xff, 0xa6, 0x16, 0x8b, 0xdc, 0x22, 0xee, 0xd3, 0xae, -+ 0xf8, 0x93, 0x0e, 0x5e, 0x7d, 0xae, 0x6b, 0x9b, 0x88, 0x62, 0xa7, 0x8d, 0xb7, 0xfd, 0xd0, 0x67, -+ 0x7e, 0x14, 0x52, 0xb5, 0x9d, 0x2e, 0x72, 0x71, 0xba, 0x45, 0x46, 0xe9, 0x6a, 0xec, 0x1b, 0x60, -+ 0xee, 0x46, 0xb4, 0xe3, 0x7b, 0x78, 0xb7, 0x8f, 0x29, 0x83, 0x4b, 0xa0, 0x22, 0x6c, 0x4c, 0xbd, -+ 0xae, 0x37, 0x6b, 0x9e, 0xec, 0xf0, 0xd1, 0x9e, 0x1f, 0xf8, 0xcc, 0x34, 0xea, 0x7a, 0x73, 0xde, -+ 0x93, 0x1d, 0x08, 0x41, 0x99, 0x32, 0x1c, 0x9b, 0xa5, 0xba, 0xde, 0x2c, 0x79, 0xa2, 0x0d, 0x97, -+ 0xc1, 0xac, 0x1f, 0x32, 0x4c, 0xee, 0xa0, 0x9e, 0x59, 0x13, 0xe3, 0x79, 0x1f, 0x7e, 0x0a, 0x66, -+ 0x28, 0x43, 0x84, 0x6d, 0x50, 0xb3, 0x5c, 0xd7, 0x9b, 0x73, 0x2b, 0xcb, 0x2d, 0x79, 0x14, 0xad, -+ 0xec, 0x28, 0x5a, 0x1b, 0xd9, 0x51, 0xb8, 0xb3, 0x0f, 0x12, 0x5b, 0xbb, 0xf7, 0x97, 0xad, 0x7b, -+ 0x19, 0x08, 0x7e, 0x08, 0x2a, 0x38, 0x6c, 0x6f, 0x50, 0xb3, 0x72, 0x04, 0xb4, 0x84, 0xc0, 0xf7, -+ 0x40, 0xad, 0xed, 0x13, 0xbc, 0xc5, 0x39, 0x33, 0xab, 0x75, 0xbd, 0xb9, 0xb0, 0xb2, 0xd8, 0xca, -+ 0x8f, 0xf6, 0x5a, 0x36, 0xe5, 0x0d, 0xad, 0xb8, 0x7b, 0x31, 0x62, 0x5d, 0x73, 0x46, 0x30, 0x21, -+ 0xda, 0xb0, 0x01, 0xaa, 0xb4, 0x8b, 0x48, 0x9b, 0x9a, 0xb3, 0xf5, 0x52, 0xb3, 0xe6, 0x82, 0xc3, -+ 0xc4, 0x4e, 0x47, 0xbc, 0xf4, 0xb7, 0xf1, 0xaf, 0x0e, 0x20, 0xa7, 0x74, 0x3d, 0xa4, 0x0c, 0x85, -+ 0xec, 0x38, 0xcc, 0x7e, 0x0c, 0xaa, 0x5c, 0x94, 0x1b, 0x54, 0x70, 0x3b, 0xad, 0xab, 0x29, 0xa6, -+ 0xe8, 0x6b, 0xf9, 0x48, 0xbe, 0x56, 0x26, 0xfa, 0x5a, 0x7d, 0xaa, 0xaf, 0xbf, 0x94, 0xc1, 0x29, -+ 0x29, 0x1f, 0x1a, 0x47, 0x21, 0xc5, 0x1c, 0x74, 0x9b, 0x21, 0xd6, 0xa7, 0xd2, 0xcd, 0x14, 0x24, -+ 0x46, 0xbc, 0x74, 0x06, 0x7e, 0x06, 0xca, 0xd7, 0x10, 0x43, 0xc2, 0xe5, 0xb9, 0x95, 0xa5, 0x96, -+ 0x22, 0x4a, 0xbe, 0x16, 0x9f, 0x73, 0xcf, 0x70, 0xaf, 0x0e, 0x13, 0x7b, 0xa1, 0x8d, 0x18, 0x7a, -+ 0x27, 0x0a, 0x7c, 0x86, 0x83, 0x98, 0xed, 0x79, 0x02, 0x09, 0xdf, 0x07, 0xb5, 0xcf, 0x09, 0x89, -+ 0xc8, 0xc6, 0x5e, 0x8c, 0x05, 0x45, 0x35, 0xf7, 0xec, 0x61, 0x62, 0x2f, 0xe2, 0x6c, 0x50, 0x41, -+ 0x0c, 0x2d, 0xe1, 0xdb, 0xa0, 0x22, 0x3a, 0x82, 0x94, 0x9a, 0xbb, 0x78, 0x98, 0xd8, 0xaf, 0x08, -+ 0x88, 0x62, 0x2e, 0x2d, 0x8a, 0x1c, 0x56, 0xa6, 0xe2, 0x30, 0x3f, 0xca, 0xaa, 0x7a, 0x94, 0x26, -+ 0x98, 0xb9, 0x83, 0x09, 0xe5, 0xcb, 0xcc, 0x88, 0xf1, 0xac, 0x0b, 0xaf, 0x00, 0xc0, 0x89, 0xf1, -+ 0x29, 0xf3, 0xb7, 0xb8, 0x9e, 0x38, 0x19, 0xf3, 0x2d, 0x99, 0x2e, 0x3c, 0x4c, 0xfb, 0x3d, 0xe6, -+ 0xc2, 0x94, 0x05, 0xc5, 0xd0, 0x53, 0xda, 0xf0, 0xbe, 0x0e, 0x66, 0xd6, 0x30, 0x6a, 0x63, 0x42, -+ 0xcd, 0x5a, 0xbd, 0xd4, 0x9c, 0x5b, 0x79, 0xb3, 0xa5, 0xe6, 0x86, 0x9b, 0x24, 0x0a, 0x30, 0xeb, -+ 0xe2, 0x3e, 0xcd, 0x0e, 0x48, 0x5a, 0xbb, 0x3b, 0x07, 0x89, 0xbd, 0xd9, 0xf1, 0x59, 0xb7, 0xbf, -+ 0xd9, 0xda, 0x8a, 0x02, 0xa7, 0x43, 0xd0, 0x36, 0x0a, 0x91, 0xd3, 0x8b, 0x76, 0x7c, 0xe7, 0xc8, -+ 0xf9, 0xe8, 0xa9, 0xdf, 0x39, 0x4c, 0x6c, 0xfd, 0x5d, 0x2f, 0xdb, 0x62, 0xe3, 0x4f, 0x1d, 0xbc, -+ 0xca, 0x4f, 0xf8, 0x36, 0x5f, 0x9b, 0x2a, 0x81, 0x11, 0x20, 0xb6, 0xd5, 0x35, 0x75, 0x2e, 0x33, -+ 0x4f, 0x76, 0xd4, 0x64, 0x61, 0xbc, 0x50, 0xb2, 0x28, 0x1d, 0x3d, 0x59, 0x64, 0xd1, 0x50, 0x9e, -+ 0x18, 0x0d, 0x95, 0xa7, 0x46, 0xc3, 0x0f, 0x25, 0x19, 0xf9, 0x99, 0x7f, 0x47, 0x88, 0x89, 0xeb, -+ 0x79, 0x4c, 0x94, 0xc4, 0x6e, 0x73, 0xa9, 0xc9, 0xb5, 0xd6, 0xdb, 0x38, 0x64, 0xfe, 0xb6, 0x8f, -+ 0xc9, 0x73, 0x22, 0x43, 0x91, 0x5b, 0xa9, 0x28, 0x37, 0x55, 0x2b, 0xe5, 0x97, 0x5e, 0x2b, 0x23, -+ 0xd1, 0x51, 0x39, 0x46, 0x74, 0x34, 0x7e, 0xd7, 0xc1, 0x6b, 0xfc, 0x38, 0x6e, 0xa0, 0x4d, 0xdc, -+ 0xfb, 0x0a, 0x05, 0x43, 0xc9, 0x29, 0xe2, 0xd2, 0x5f, 0x48, 0x5c, 0xc6, 0xf1, 0xc5, 0x55, 0x52, -+ 0xc4, 0x95, 0xd7, 0x86, 0xb2, 0x52, 0x1b, 0x1a, 0x4f, 0x0c, 0x70, 0x66, 0x74, 0xff, 0x47, 0x90, -+ 0xd4, 0x5b, 0x8a, 0xa4, 0x6a, 0x2e, 0xfc, 0x5f, 0x32, 0x53, 0x48, 0xe6, 0x27, 0x1d, 0xcc, 0x66, -+ 0x35, 0x08, 0xb6, 0x00, 0x90, 0x30, 0x51, 0x66, 0x24, 0xd1, 0x0b, 0x1c, 0x4c, 0xf2, 0x51, 0x4f, -+ 0xb1, 0x80, 0xdf, 0x80, 0xaa, 0xec, 0xa5, 0x51, 0x7c, 0x56, 0x89, 0x62, 0x46, 0x30, 0x0a, 0xae, -+ 0xb4, 0x51, 0xcc, 0x30, 0x71, 0x2f, 0xf3, 0x5d, 0x1c, 0x24, 0xf6, 0xf9, 0x67, 0x51, 0x24, 0x6e, -+ 0x88, 0x12, 0xc7, 0x0f, 0x57, 0x7e, 0xd3, 0x4b, 0xbf, 0xd0, 0xf8, 0x51, 0x07, 0xa7, 0xf9, 0x46, -+ 0x39, 0x35, 0xb9, 0x2a, 0xae, 0x81, 0x59, 0x92, 0xb6, 0x53, 0x5d, 0x37, 0x5a, 0x45, 0x5a, 0x27, -+ 0x50, 0xe9, 0x96, 0x1f, 0x24, 0xb6, 0xee, 0xe5, 0x48, 0xb8, 0x5a, 0xa0, 0xd1, 0x98, 0x44, 0x23, -+ 0x87, 0x68, 0x05, 0xe2, 0x7e, 0x33, 0x00, 0x5c, 0x0f, 0xdb, 0xf8, 0x2e, 0x17, 0xdf, 0x50, 0xa7, -+ 0xfd, 0xb1, 0x1d, 0x9d, 0x1b, 0x92, 0x32, 0x6e, 0xef, 0x7e, 0x74, 0x90, 0xd8, 0x97, 0x9e, 0xc5, -+ 0xca, 0x33, 0xc0, 0x8a, 0x0b, 0xaa, 0x70, 0x8d, 0x97, 0xbf, 0x2e, 0x7e, 0x6f, 0x80, 0xf9, 0xaf, -+ 0xa3, 0x5e, 0x3f, 0xc0, 0x59, 0x82, 0xba, 0x0a, 0xca, 0xdb, 0x24, 0x0a, 0x04, 0x67, 0x25, 0xd7, -+ 0x99, 0xa0, 0x97, 0x38, 0x5f, 0xda, 0xd9, 0x8a, 0x82, 0x20, 0x0a, 0x1d, 0xf1, 0xe8, 0x10, 0x99, -+ 0xc7, 0x13, 0x60, 0xb8, 0x0e, 0x66, 0x58, 0x97, 0x44, 0xfd, 0x4e, 0x57, 0x9c, 0xe2, 0x31, 0xd6, -+ 0xc9, 0xf0, 0xfc, 0x5a, 0x2f, 0xca, 0x32, 0x27, 0x54, 0x26, 0xae, 0xbc, 0x3f, 0xbc, 0xf7, 0xf0, -+ 0xe4, 0x55, 0x19, 0x7d, 0x1c, 0x54, 0x94, 0xc7, 0x41, 0x03, 0x9c, 0x62, 0x88, 0x74, 0x30, 0x13, -+ 0x19, 0x2d, 0xbd, 0x57, 0x7a, 0x85, 0xb1, 0xc6, 0xaf, 0x06, 0x58, 0xc8, 0xb8, 0x48, 0x4f, 0x33, -+ 0x18, 0x13, 0x91, 0x39, 0x14, 0x51, 0xd1, 0xd6, 0xbd, 0x74, 0x90, 0xd8, 0xab, 0x53, 0x09, 0xa8, -+ 0x08, 0x3c, 0xb9, 0xe2, 0xb9, 0x6f, 0x80, 0xa5, 0x8d, 0x28, 0xfe, 0xe2, 0xb6, 0x78, 0x8a, 0x2a, -+ 0x35, 0x02, 0x8f, 0xd1, 0xb6, 0x34, 0xa4, 0x8d, 0x23, 0xbe, 0x44, 0x8c, 0xf8, 0x77, 0xdd, 0xd5, -+ 0x83, 0xc4, 0x76, 0xa6, 0xa2, 0x6c, 0x08, 0x3a, 0xb9, 0x74, 0xfd, 0x61, 0x80, 0x33, 0xb7, 0xfa, -+ 0x28, 0x64, 0x7e, 0x0f, 0x4b, 0xca, 0x72, 0xc2, 0xf6, 0xc6, 0x08, 0xb3, 0x86, 0x84, 0x15, 0x31, -+ 0x29, 0x75, 0x9f, 0x1c, 0x24, 0xf6, 0xe5, 0xa9, 0xa8, 0x9b, 0x04, 0x3f, 0xb9, 0x24, 0x7e, 0x57, -+ 0x06, 0xf3, 0xb7, 0xf8, 0x2a, 0x39, 0x77, 0x1f, 0x80, 0x2a, 0x15, 0x37, 0xd5, 0x9c, 0xb9, 0x91, -+ 0x57, 0x5d, 0xf1, 0x4e, 0xbc, 0xa6, 0x79, 0xa9, 0x3d, 0x7f, 0xeb, 0xf6, 0x64, 0x3a, 0x30, 0xc6, -+ 0x4a, 0x56, 0x6b, 0xf2, 0xf5, 0x87, 0xa3, 0x25, 0x06, 0x5e, 0x04, 0x15, 0x51, 0x99, 0xd2, 0x6b, -+ 0x7e, 0xe1, 0xb3, 0xe3, 0x25, 0x62, 0x4d, 0xf3, 0xa4, 0x39, 0x5c, 0x01, 0x65, 0x9e, 0xf9, 0xd2, -+ 0x7f, 0x44, 0x9c, 0x1b, 0xfd, 0xa6, 0x5a, 0x56, 0xd7, 0x34, 0x4f, 0xd8, 0xc2, 0x0b, 0xfc, 0xd6, -+ 0xc8, 0xeb, 0x71, 0x76, 0xb9, 0x30, 0x47, 0x61, 0x0a, 0x24, 0x33, 0x85, 0x17, 0x40, 0xf5, 0x8e, -+ 0x48, 0x35, 0xe2, 0x5d, 0xc8, 0x2f, 0x8b, 0x0a, 0xa8, 0x98, 0x84, 0xb8, 0x5f, 0xd2, 0x16, 0x5e, -+ 0x07, 0xa7, 0x58, 0x14, 0xef, 0x64, 0x41, 0x2d, 0xde, 0x8e, 0x73, 0x2b, 0x75, 0x15, 0x3b, 0x29, -+ 0xe8, 0xd7, 0x34, 0xaf, 0x80, 0x83, 0x37, 0xc1, 0xe9, 0xdd, 0x82, 0xf4, 0x70, 0xf6, 0xd4, 0x2c, -+ 0xf0, 0x3c, 0x39, 0x22, 0xd6, 0x34, 0x6f, 0x0c, 0xed, 0x82, 0x61, 0x94, 0xb8, 0x17, 0xf6, 0x1f, -+ 0x59, 0xda, 0xc3, 0x47, 0x96, 0xf6, 0xe4, 0x91, 0xa5, 0x7f, 0x3b, 0xb0, 0xf4, 0x9f, 0x07, 0x96, -+ 0xfe, 0x60, 0x60, 0xe9, 0xfb, 0x03, 0x4b, 0xff, 0x7b, 0x60, 0xe9, 0xff, 0x0c, 0x2c, 0xed, 0xc9, -+ 0xc0, 0xd2, 0xef, 0x3d, 0xb6, 0xb4, 0xfd, 0xc7, 0x96, 0xf6, 0xf0, 0xb1, 0xa5, 0x6d, 0x56, 0x45, -+ 0x48, 0xac, 0xfe, 0x17, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x13, 0x2b, 0x79, 0xd4, 0x13, 0x00, 0x00, - } - - func (this *LokiRequest) Equal(that interface{}) bool { -@@ -1672,6 +1726,42 @@ func (this *TopKSketchesResponse) Equal(that interface{}) bool { - } - return true - } -+func (this *QuantileSketchResponse) Equal(that interface{}) bool { -+ if that == nil { -+ return this == nil -+ } -+ -+ that1, ok := that.(*QuantileSketchResponse) -+ if !ok { -+ that2, ok := that.(QuantileSketchResponse) -+ if ok { -+ that1 = &that2 -+ } else { -+ return false -+ } -+ } -+ if that1 == nil { -+ return this == nil -+ } else if this == nil { -+ return false -+ } -+ if that1.Response == nil { -+ if this.Response != nil { -+ return false -+ } -+ } else if !this.Response.Equal(*that1.Response) { -+ return false -+ } -+ if len(this.Headers) != len(that1.Headers) { -+ return false -+ } -+ for i := range this.Headers { -+ if !this.Headers[i].Equal(that1.Headers[i]) { -+ return false -+ } -+ } -+ return true -+} - func (this *QueryResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil -@@ -1870,6 +1960,30 @@ func (this *QueryResponse_TopkSketches) Equal(that interface{}) bool { - } - return true - } -+func (this *QueryResponse_QuantileSketches) Equal(that interface{}) bool { -+ if that == nil { -+ return this == nil -+ } -+ -+ that1, ok := that.(*QueryResponse_QuantileSketches) -+ if !ok { -+ that2, ok := that.(QueryResponse_QuantileSketches) -+ if ok { -+ that1 = &that2 -+ } else { -+ return false -+ } -+ } -+ if that1 == nil { -+ return this == nil -+ } else if this == nil { -+ return false -+ } -+ if !this.QuantileSketches.Equal(that1.QuantileSketches) { -+ return false -+ } -+ return true -+} - func (this *LokiRequest) GoString() string { - if this == nil { - return ""nil"" -@@ -2054,11 +2168,22 @@ func (this *TopKSketchesResponse) GoString() string { - s = append(s, ""}"") - return strings.Join(s, """") - } -+func (this *QuantileSketchResponse) GoString() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := make([]string, 0, 6) -+ s = append(s, ""&queryrange.QuantileSketchResponse{"") -+ s = append(s, ""Response: ""+fmt.Sprintf(""%#v"", this.Response)+"",\n"") -+ s = append(s, ""Headers: ""+fmt.Sprintf(""%#v"", this.Headers)+"",\n"") -+ s = append(s, ""}"") -+ return strings.Join(s, """") -+} - func (this *QueryResponse) GoString() string { - if this == nil { - return ""nil"" - } -- s := make([]string, 0, 11) -+ s := make([]string, 0, 12) - s = append(s, ""&queryrange.QueryResponse{"") - if this.Response != nil { - s = append(s, ""Response: ""+fmt.Sprintf(""%#v"", this.Response)+"",\n"") -@@ -2122,6 +2247,14 @@ func (this *QueryResponse_TopkSketches) GoString() string { - `TopkSketches:` + fmt.Sprintf(""%#v"", this.TopkSketches) + `}`}, "", "") - return s - } -+func (this *QueryResponse_QuantileSketches) GoString() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := strings.Join([]string{`&queryrange.QueryResponse_QuantileSketches{` + -+ `QuantileSketches:` + fmt.Sprintf(""%#v"", this.QuantileSketches) + `}`}, "", "") -+ return s -+} - func valueToGoStringQueryrange(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { -@@ -2922,6 +3055,55 @@ func (m *TopKSketchesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - return len(dAtA) - i, nil - } - -+func (m *QuantileSketchResponse) Marshal() (dAtA []byte, err error) { -+ size := m.Size() -+ dAtA = make([]byte, size) -+ n, err := m.MarshalToSizedBuffer(dAtA[:size]) -+ if err != nil { -+ return nil, err -+ } -+ return dAtA[:n], nil -+} -+ -+func (m *QuantileSketchResponse) MarshalTo(dAtA []byte) (int, error) { -+ size := m.Size() -+ return m.MarshalToSizedBuffer(dAtA[:size]) -+} -+ -+func (m *QuantileSketchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+ i := len(dAtA) -+ _ = i -+ var l int -+ _ = l -+ if len(m.Headers) > 0 { -+ for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { -+ { -+ size := m.Headers[iNdEx].Size() -+ i -= size -+ if _, err := m.Headers[iNdEx].MarshalTo(dAtA[i:]); err != nil { -+ return 0, err -+ } -+ i = encodeVarintQueryrange(dAtA, i, uint64(size)) -+ } -+ i-- -+ dAtA[i] = 0x12 -+ } -+ } -+ if m.Response != nil { -+ { -+ size := m.Response.Size() -+ i -= size -+ if _, err := m.Response.MarshalTo(dAtA[i:]); err != nil { -+ return 0, err -+ } -+ i = encodeVarintQueryrange(dAtA, i, uint64(size)) -+ } -+ i-- -+ dAtA[i] = 0xa -+ } -+ return len(dAtA) - i, nil -+} -+ - func (m *QueryResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) -@@ -3094,6 +3276,26 @@ func (m *QueryResponse_TopkSketches) MarshalToSizedBuffer(dAtA []byte) (int, err - } - return len(dAtA) - i, nil - } -+func (m *QueryResponse_QuantileSketches) MarshalTo(dAtA []byte) (int, error) { -+ return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -+} -+ -+func (m *QueryResponse_QuantileSketches) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+ i := len(dAtA) -+ if m.QuantileSketches != nil { -+ { -+ size, err := m.QuantileSketches.MarshalToSizedBuffer(dAtA[:i]) -+ if err != nil { -+ return 0, err -+ } -+ i -= size -+ i = encodeVarintQueryrange(dAtA, i, uint64(size)) -+ } -+ i-- -+ dAtA[i] = 0x42 -+ } -+ return len(dAtA) - i, nil -+} - func encodeVarintQueryrange(dAtA []byte, offset int, v uint64) int { - offset -= sovQueryrange(v) - base := offset -@@ -3447,6 +3649,25 @@ func (m *TopKSketchesResponse) Size() (n int) { - return n - } - -+func (m *QuantileSketchResponse) Size() (n int) { -+ if m == nil { -+ return 0 -+ } -+ var l int -+ _ = l -+ if m.Response != nil { -+ l = m.Response.Size() -+ n += 1 + l + sovQueryrange(uint64(l)) -+ } -+ if len(m.Headers) > 0 { -+ for _, e := range m.Headers { -+ l = e.Size() -+ n += 1 + l + sovQueryrange(uint64(l)) -+ } -+ } -+ return n -+} -+ - func (m *QueryResponse) Size() (n int) { - if m == nil { - return 0 -@@ -3543,6 +3764,18 @@ func (m *QueryResponse_TopkSketches) Size() (n int) { - } - return n - } -+func (m *QueryResponse_QuantileSketches) Size() (n int) { -+ if m == nil { -+ return 0 -+ } -+ var l int -+ _ = l -+ if m.QuantileSketches != nil { -+ l = m.QuantileSketches.Size() -+ n += 1 + l + sovQueryrange(uint64(l)) -+ } -+ return n -+} - - func sovQueryrange(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -@@ -3731,6 +3964,17 @@ func (this *TopKSketchesResponse) String() string { - }, """") - return s - } -+func (this *QuantileSketchResponse) String() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := strings.Join([]string{`&QuantileSketchResponse{`, -+ `Response:` + fmt.Sprintf(""%v"", this.Response) + `,`, -+ `Headers:` + fmt.Sprintf(""%v"", this.Headers) + `,`, -+ `}`, -+ }, """") -+ return s -+} - func (this *QueryResponse) String() string { - if this == nil { - return ""nil"" -@@ -3811,6 +4055,16 @@ func (this *QueryResponse_TopkSketches) String() string { - }, """") - return s - } -+func (this *QueryResponse_QuantileSketches) String() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := strings.Join([]string{`&QueryResponse_QuantileSketches{`, -+ `QuantileSketches:` + strings.Replace(fmt.Sprintf(""%v"", this.QuantileSketches), ""QuantileSketchResponse"", ""QuantileSketchResponse"", 1) + `,`, -+ `}`, -+ }, """") -+ return s -+} - func valueToStringQueryrange(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { -@@ -6245,6 +6499,129 @@ func (m *TopKSketchesResponse) Unmarshal(dAtA []byte) error { - } - return nil - } -+func (m *QuantileSketchResponse) Unmarshal(dAtA []byte) error { -+ l := len(dAtA) -+ iNdEx := 0 -+ for iNdEx < l { -+ preIndex := iNdEx -+ var wire uint64 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowQueryrange -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ wire |= uint64(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ fieldNum := int32(wire >> 3) -+ wireType := int(wire & 0x7) -+ if wireType == 4 { -+ return fmt.Errorf(""proto: QuantileSketchResponse: wiretype end group for non-group"") -+ } -+ if fieldNum <= 0 { -+ return fmt.Errorf(""proto: QuantileSketchResponse: illegal tag %d (wire type %d)"", fieldNum, wire) -+ } -+ switch fieldNum { -+ case 1: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Response"", wireType) -+ } -+ var msglen int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowQueryrange -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ msglen |= int(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ if msglen < 0 { -+ return ErrInvalidLengthQueryrange -+ } -+ postIndex := iNdEx + msglen -+ if postIndex < 0 { -+ return ErrInvalidLengthQueryrange -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ if m.Response == nil { -+ m.Response = &github_com_grafana_loki_pkg_logproto.QuantileSketchMatrix{} -+ } -+ if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { -+ return err -+ } -+ iNdEx = postIndex -+ case 2: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Headers"", wireType) -+ } -+ var msglen int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowQueryrange -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ msglen |= int(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ if msglen < 0 { -+ return ErrInvalidLengthQueryrange -+ } -+ postIndex := iNdEx + msglen -+ if postIndex < 0 { -+ return ErrInvalidLengthQueryrange -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ m.Headers = append(m.Headers, github_com_grafana_loki_pkg_querier_queryrange_queryrangebase_definitions.PrometheusResponseHeader{}) -+ if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { -+ return err -+ } -+ iNdEx = postIndex -+ default: -+ iNdEx = preIndex -+ skippy, err := skipQueryrange(dAtA[iNdEx:]) -+ if err != nil { -+ return err -+ } -+ if skippy < 0 { -+ return ErrInvalidLengthQueryrange -+ } -+ if (iNdEx + skippy) < 0 { -+ return ErrInvalidLengthQueryrange -+ } -+ if (iNdEx + skippy) > l { -+ return io.ErrUnexpectedEOF -+ } -+ iNdEx += skippy -+ } -+ } -+ -+ if iNdEx > l { -+ return io.ErrUnexpectedEOF -+ } -+ return nil -+} - func (m *QueryResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 -@@ -6519,6 +6896,41 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error { - } - m.Response = &QueryResponse_TopkSketches{v} - iNdEx = postIndex -+ case 8: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field QuantileSketches"", wireType) -+ } -+ var msglen int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowQueryrange -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ msglen |= int(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ if msglen < 0 { -+ return ErrInvalidLengthQueryrange -+ } -+ postIndex := iNdEx + msglen -+ if postIndex < 0 { -+ return ErrInvalidLengthQueryrange -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ v := &QuantileSketchResponse{} -+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { -+ return err -+ } -+ m.Response = &QueryResponse_QuantileSketches{v} -+ iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) -diff --git a/pkg/querier/queryrange/queryrange.proto b/pkg/querier/queryrange/queryrange.proto -index 4b03746a16160..32e39005b3928 100644 ---- a/pkg/querier/queryrange/queryrange.proto -+++ b/pkg/querier/queryrange/queryrange.proto -@@ -177,6 +177,14 @@ message TopKSketchesResponse { - ]; - } - -+message QuantileSketchResponse { -+ logproto.QuantileSketchMatrix response = 1 [(gogoproto.customtype) = ""github.com/grafana/loki/pkg/logproto.QuantileSketchMatrix""]; -+ repeated definitions.PrometheusResponseHeader Headers = 2 [ -+ (gogoproto.jsontag) = ""-"", -+ (gogoproto.customtype) = ""github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions.PrometheusResponseHeader"" -+ ]; -+} -+ - message QueryResponse { - oneof response { - LokiSeriesResponse series = 1; -@@ -186,5 +194,6 @@ message QueryResponse { - LokiResponse streams = 5; - VolumeResponse volume = 6; - TopKSketchesResponse topkSketches = 7; -+ QuantileSketchResponse quantileSketches = 8; - } - } -diff --git a/vendor/github.com/DataDog/sketches-go/LICENSE b/vendor/github.com/DataDog/sketches-go/LICENSE -new file mode 100644 -index 0000000000000..7d3693beef732 ---- /dev/null -+++ b/vendor/github.com/DataDog/sketches-go/LICENSE -@@ -0,0 +1,13 @@ -+Copyright 2021 DataDog, Inc. -+ -+Licensed under the Apache License, Version 2.0 (the ""License""); -+you may not use this file except in compliance with the License. -+You may obtain a copy of the License at -+ -+ http://www.apache.org/licenses/LICENSE-2.0 -+ -+Unless required by applicable law or agreed to in writing, software -+distributed under the License is distributed on an ""AS IS"" BASIS, -+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+See the License for the specific language governing permissions and -+limitations under the License. -diff --git a/vendor/github.com/DataDog/sketches-go/LICENSE-3rdparty.csv b/vendor/github.com/DataDog/sketches-go/LICENSE-3rdparty.csv -new file mode 100644 -index 0000000000000..db2f8cca0ab16 ---- /dev/null -+++ b/vendor/github.com/DataDog/sketches-go/LICENSE-3rdparty.csv -@@ -0,0 +1,3 @@ -+Component,Origin,License -+import (test),github.com/google/gofuzz,Apache-2.0 -+import (test),github.com/stretchr/testify,MIT -diff --git a/vendor/github.com/DataDog/sketches-go/NOTICE b/vendor/github.com/DataDog/sketches-go/NOTICE -new file mode 100644 -index 0000000000000..7ae253a014430 ---- /dev/null -+++ b/vendor/github.com/DataDog/sketches-go/NOTICE -@@ -0,0 +1,4 @@ -+Datadog sketches-go -+Copyright 2021 Datadog, Inc. -+ -+This product includes software developed at Datadog (https://www.datadoghq.com/). -diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/ddsketch.go b/vendor/github.com/DataDog/sketches-go/ddsketch/ddsketch.go -new file mode 100644 -index 0000000000000..187c10f8adcf7 ---- /dev/null -+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/ddsketch.go -@@ -0,0 +1,763 @@ -+// Unless explicitly stated otherwise all files in this repository are licensed -+// under the Apache License 2.0. -+// This product includes software developed at Datadog (https://www.datadoghq.com/). -+// Copyright 2021 Datadog, Inc. -+ -+package ddsketch -+ -+import ( -+ ""errors"" -+ ""io"" -+ ""math"" -+ -+ enc ""github.com/DataDog/sketches-go/ddsketch/encoding"" -+ ""github.com/DataDog/sketches-go/ddsketch/mapping"" -+ ""github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"" -+ ""github.com/DataDog/sketches-go/ddsketch/stat"" -+ ""github.com/DataDog/sketches-go/ddsketch/store"" -+) -+ -+var ( -+ ErrUntrackableNaN = errors.New(""input value is NaN and cannot be tracked by the sketch"") -+ ErrUntrackableTooLow = errors.New(""input value is too low and cannot be tracked by the sketch"") -+ ErrUntrackableTooHigh = errors.New(""input value is too high and cannot be tracked by the sketch"") -+ ErrNegativeCount = errors.New(""count cannot be negative"") -+ errEmptySketch = errors.New(""no such element exists"") -+ errUnknownFlag = errors.New(""unknown encoding flag"") -+) -+ -+// Unexported to prevent usage and avoid the cost of dynamic dispatch -+type quantileSketch interface { -+ RelativeAccuracy() float64 -+ IsEmpty() bool -+ GetCount() float64 -+ GetZeroCount() float64 -+ GetSum() float64 -+ GetPositiveValueStore() store.Store -+ GetNegativeValueStore() store.Store -+ GetMinValue() (float64, error) -+ GetMaxValue() (float64, error) -+ GetValueAtQuantile(quantile float64) (float64, error) -+ GetValuesAtQuantiles(quantiles []float64) ([]float64, error) -+ ForEach(f func(value, count float64) (stop bool)) -+ Add(value float64) error -+ AddWithCount(value, count float64) error -+ // MergeWith -+ // ChangeMapping -+ Reweight(factor float64) error -+ Clear() -+ // Copy -+ Encode(b *[]byte, omitIndexMapping bool) -+ DecodeAndMergeWith(b []byte) error -+} -+ -+var _ quantileSketch = (*DDSketch)(nil) -+var _ quantileSketch = (*DDSketchWithExactSummaryStatistics)(nil) -+ -+type DDSketch struct { -+ mapping.IndexMapping -+ positiveValueStore store.Store -+ negativeValueStore store.Store -+ zeroCount float64 -+} -+ -+func NewDDSketchFromStoreProvider(indexMapping mapping.IndexMapping, storeProvider store.Provider) *DDSketch { -+ return NewDDSketch(indexMapping, storeProvider(), storeProvider()) -+} -+ -+func NewDDSketch(indexMapping mapping.IndexMapping, positiveValueStore store.Store, negativeValueStore store.Store) *DDSketch { -+ return &DDSketch{ -+ IndexMapping: indexMapping, -+ positiveValueStore: positiveValueStore, -+ negativeValueStore: negativeValueStore, -+ } -+} -+ -+func NewDefaultDDSketch(relativeAccuracy float64) (*DDSketch, error) { -+ m, err := mapping.NewDefaultMapping(relativeAccuracy) -+ if err != nil { -+ return nil, err -+ } -+ return NewDDSketchFromStoreProvider(m, store.DefaultProvider), nil -+} -+ -+// Constructs an instance of DDSketch that offers constant-time insertion and whose size grows indefinitely -+// to accommodate for the range of input values. -+func LogUnboundedDenseDDSketch(relativeAccuracy float64) (*DDSketch, error) { -+ indexMapping, err := mapping.NewLogarithmicMapping(relativeAccuracy) -+ if err != nil { -+ return nil, err -+ } -+ return NewDDSketch(indexMapping, store.NewDenseStore(), store.NewDenseStore()), nil -+} -+ -+// Constructs an instance of DDSketch that offers constant-time insertion and whose size grows until the -+// maximum number of bins is reached, at which point bins with lowest indices are collapsed, which causes the -+// relative accuracy guarantee to be lost on lowest quantiles if values are all positive, or the mid-range -+// quantiles for values closest to zero if values include negative numbers. -+func LogCollapsingLowestDenseDDSketch(relativeAccuracy float64, maxNumBins int) (*DDSketch, error) { -+ indexMapping, err := mapping.NewLogarithmicMapping(relativeAccuracy) -+ if err != nil { -+ return nil, err -+ } -+ return NewDDSketch(indexMapping, store.NewCollapsingLowestDenseStore(maxNumBins), store.NewCollapsingLowestDenseStore(maxNumBins)), nil -+} -+ -+// Constructs an instance of DDSketch that offers constant-time insertion and whose size grows until the -+// maximum number of bins is reached, at which point bins with highest indices are collapsed, which causes the -+// relative accuracy guarantee to be lost on highest quantiles if values are all positive, or the lowest and -+// highest quantiles if values include negative numbers. -+func LogCollapsingHighestDenseDDSketch(relativeAccuracy float64, maxNumBins int) (*DDSketch, error) { -+ indexMapping, err := mapping.NewLogarithmicMapping(relativeAccuracy) -+ if err != nil { -+ return nil, err -+ } -+ return NewDDSketch(indexMapping, store.NewCollapsingHighestDenseStore(maxNumBins), store.NewCollapsingHighestDenseStore(maxNumBins)), nil -+} -+ -+// Adds a value to the sketch. -+func (s *DDSketch) Add(value float64) error { -+ return s.AddWithCount(value, float64(1)) -+} -+ -+// Adds a value to the sketch with a float64 count. -+func (s *DDSketch) AddWithCount(value, count float64) error { -+ if count < 0 { -+ return ErrNegativeCount -+ } -+ -+ if value > s.MinIndexableValue() { -+ if value > s.MaxIndexableValue() { -+ return ErrUntrackableTooHigh -+ } -+ s.positiveValueStore.AddWithCount(s.Index(value), count) -+ } else if value < -s.MinIndexableValue() { -+ if value < -s.MaxIndexableValue() { -+ return ErrUntrackableTooLow -+ } -+ s.negativeValueStore.AddWithCount(s.Index(-value), count) -+ } else if math.IsNaN(value) { -+ return ErrUntrackableNaN -+ } else { -+ s.zeroCount += count -+ } -+ return nil -+} -+ -+// Return a (deep) copy of this sketch. -+func (s *DDSketch) Copy() *DDSketch { -+ return &DDSketch{ -+ IndexMapping: s.IndexMapping, -+ positiveValueStore: s.positiveValueStore.Copy(), -+ negativeValueStore: s.negativeValueStore.Copy(), -+ zeroCount: s.zeroCount, -+ } -+} -+ -+// Clear empties the sketch while allowing reusing already allocated memory. -+func (s *DDSketch) Clear() { -+ s.positiveValueStore.Clear() -+ s.negativeValueStore.Clear() -+ s.zeroCount = 0 -+} -+ -+// Return the value at the specified quantile. Return a non-nil error if the quantile is invalid -+// or if the sketch is empty. -+func (s *DDSketch) GetValueAtQuantile(quantile float64) (float64, error) { -+ if quantile < 0 || quantile > 1 { -+ return math.NaN(), errors.New(""The quantile must be between 0 and 1."") -+ } -+ -+ count := s.GetCount() -+ if count == 0 { -+ return math.NaN(), errEmptySketch -+ } -+ -+ rank := quantile * (count - 1) -+ negativeValueCount := s.negativeValueStore.TotalCount() -+ if rank < negativeValueCount { -+ return -s.Value(s.negativeValueStore.KeyAtRank(negativeValueCount - 1 - rank)), nil -+ } else if rank < s.zeroCount+negativeValueCount { -+ return 0, nil -+ } else { -+ return s.Value(s.positiveValueStore.KeyAtRank(rank - s.zeroCount - negativeValueCount)), nil -+ } -+} -+ -+// Return the values at the respective specified quantiles. Return a non-nil error if any of the quantiles -+// is invalid or if the sketch is empty. -+func (s *DDSketch) GetValuesAtQuantiles(quantiles []float64) ([]float64, error) { -+ values := make([]float64, len(quantiles)) -+ for i, q := range quantiles { -+ val, err := s.GetValueAtQuantile(q) -+ if err != nil { -+ return nil, err -+ } -+ values[i] = val -+ } -+ return values, nil -+} -+ -+// Return the total number of values that have been added to this sketch. -+func (s *DDSketch) GetCount() float64 { -+ return s.zeroCount + s.positiveValueStore.TotalCount() + s.negativeValueStore.TotalCount() -+} -+ -+// GetZeroCount returns the number of zero values that have been added to this sketch. -+// Note: values that are very small (lower than MinIndexableValue if positive, or higher than -MinIndexableValue if negative) -+// are also mapped to the zero bucket. -+func (s *DDSketch) GetZeroCount() float64 { -+ return s.zeroCount -+} -+ -+// Return true iff no value has been added to this sketch. -+func (s *DDSketch) IsEmpty() bool { -+ return s.zeroCount == 0 && s.positiveValueStore.IsEmpty() && s.negativeValueStore.IsEmpty() -+} -+ -+// Return the maximum value that has been added to this sketch. Return a non-nil error if the sketch -+// is empty. -+func (s *DDSketch) GetMaxValue() (float64, error) { -+ if !s.positiveValueStore.IsEmpty() { -+ maxIndex, _ := s.positiveValueStore.MaxIndex() -+ return s.Value(maxIndex), nil -+ } else if s.zeroCount > 0 { -+ return 0, nil -+ } else { -+ minIndex, err := s.negativeValueStore.MinIndex() -+ if err != nil { -+ return math.NaN(), err -+ } -+ return -s.Value(minIndex), nil -+ } -+} -+ -+// Return the minimum value that has been added to this sketch. Returns a non-nil error if the sketch -+// is empty. -+func (s *DDSketch) GetMinValue() (float64, error) { -+ if !s.negativeValueStore.IsEmpty() { -+ maxIndex, _ := s.negativeValueStore.MaxIndex() -+ return -s.Value(maxIndex), nil -+ } else if s.zeroCount > 0 { -+ return 0, nil -+ } else { -+ minIndex, err := s.positiveValueStore.MinIndex() -+ if err != nil { -+ return math.NaN(), err -+ } -+ return s.Value(minIndex), nil -+ } -+} -+ -+// GetSum returns an approximation of the sum of the values that have been added to the sketch. If the -+// values that have been added to the sketch all have the same sign, the approximation error has -+// the relative accuracy guarantees of the mapping used for this sketch. -+func (s *DDSketch) GetSum() (sum float64) { -+ s.ForEach(func(value float64, count float64) (stop bool) { -+ sum += value * count -+ return false -+ }) -+ return sum -+} -+ -+// GetPositiveValueStore returns the store.Store object that contains the positive -+// values of the sketch. -+func (s *DDSketch) GetPositiveValueStore() (store.Store) { -+ return s.positiveValueStore -+} -+ -+// GetNegativeValueStore returns the store.Store object that contains the negative -+// values of the sketch. -+func (s *DDSketch) GetNegativeValueStore() (store.Store) { -+ return s.negativeValueStore -+} -+ -+// ForEach applies f on the bins of the sketches until f returns true. -+// There is no guarantee on the bin iteration order. -+func (s *DDSketch) ForEach(f func(value, count float64) (stop bool)) { -+ if s.zeroCount != 0 && f(0, s.zeroCount) { -+ return -+ } -+ stopped := false -+ s.positiveValueStore.ForEach(func(index int, count float64) bool { -+ stopped = f(s.IndexMapping.Value(index), count) -+ return stopped -+ }) -+ if stopped { -+ return -+ } -+ s.negativeValueStore.ForEach(func(index int, count float64) bool { -+ return f(-s.IndexMapping.Value(index), count) -+ }) -+} -+ -+// Merges the other sketch into this one. After this operation, this sketch encodes the values that -+// were added to both this and the other sketches. -+func (s *DDSketch) MergeWith(other *DDSketch) error { -+ if !s.IndexMapping.Equals(other.IndexMapping) { -+ return errors.New(""Cannot merge sketches with different index mappings."") -+ } -+ s.positiveValueStore.MergeWith(other.positiveValueStore) -+ s.negativeValueStore.MergeWith(other.negativeValueStore) -+ s.zeroCount += other.zeroCount -+ return nil -+} -+ -+// Generates a protobuf representation of this DDSketch. -+func (s *DDSketch) ToProto() *sketchpb.DDSketch { -+ return &sketchpb.DDSketch{ -+ Mapping: s.IndexMapping.ToProto(), -+ PositiveValues: s.positiveValueStore.ToProto(), -+ NegativeValues: s.negativeValueStore.ToProto(), -+ ZeroCount: s.zeroCount, -+ } -+} -+ -+// FromProto builds a new instance of DDSketch based on the provided protobuf representation, using a Dense store. -+func FromProto(pb *sketchpb.DDSketch) (*DDSketch, error) { -+ return FromProtoWithStoreProvider(pb, store.DenseStoreConstructor) -+} -+ -+func FromProtoWithStoreProvider(pb *sketchpb.DDSketch, storeProvider store.Provider) (*DDSketch, error) { -+ positiveValueStore := storeProvider() -+ store.MergeWithProto(positiveValueStore, pb.PositiveValues) -+ negativeValueStore := storeProvider() -+ store.MergeWithProto(negativeValueStore, pb.NegativeValues) -+ m, err := mapping.FromProto(pb.Mapping) -+ if err != nil { -+ return nil, err -+ } -+ return &DDSketch{ -+ IndexMapping: m, -+ positiveValueStore: positiveValueStore, -+ negativeValueStore: negativeValueStore, -+ zeroCount: pb.ZeroCount, -+ }, nil -+} -+ -+// Encode serializes the sketch and appends the serialized content to the provided []byte. -+// If the capacity of the provided []byte is large enough, Encode does not allocate memory space. -+// When the index mapping is known at the time of deserialization, omitIndexMapping can be set to true to avoid encoding it and to make the serialized content smaller. -+// The encoding format is described in the encoding/flag module. -+func (s *DDSketch) Encode(b *[]byte, omitIndexMapping bool) { -+ if s.zeroCount != 0 { -+ enc.EncodeFlag(b, enc.FlagZeroCountVarFloat) -+ enc.EncodeVarfloat64(b, s.zeroCount) -+ } -+ -+ if !omitIndexMapping { -+ s.IndexMapping.Encode(b) -+ } -+ -+ s.positiveValueStore.Encode(b, enc.FlagTypePositiveStore) -+ s.negativeValueStore.Encode(b, enc.FlagTypeNegativeStore) -+} -+ -+// DecodeDDSketch deserializes a sketch. -+// Stores are built using storeProvider. The store type needs not match the -+// store that the serialized sketch initially used. However, using the same -+// store type may make decoding faster. In the absence of high performance -+// requirements, store.BufferedPaginatedStoreConstructor is a sound enough -+// choice of store provider. -+// To avoid memory allocations, it is possible to use a store provider that -+// reuses stores, by calling Clear() on previously used stores before providing -+// the store. -+// If the serialized data does not contain the index mapping, you need to -+// specify the index mapping that was used in the sketch that was encoded. -+// Otherwise, you can use nil and the index mapping will be decoded from the -+// serialized data. -+// It is possible to decode with this function an encoded -+// DDSketchWithExactSummaryStatistics, but the exact summary statistics will be -+// lost. -+func DecodeDDSketch(b []byte, storeProvider store.Provider, indexMapping mapping.IndexMapping) (*DDSketch, error) { -+ s := &DDSketch{ -+ IndexMapping: indexMapping, -+ positiveValueStore: storeProvider(), -+ negativeValueStore: storeProvider(), -+ zeroCount: float64(0), -+ } -+ err := s.DecodeAndMergeWith(b) -+ return s, err -+} -+ -+// DecodeAndMergeWith deserializes a sketch and merges its content in the -+// receiver sketch. -+// If the serialized content contains an index mapping that differs from the one -+// of the receiver, DecodeAndMergeWith returns an error. -+func (s *DDSketch) DecodeAndMergeWith(bb []byte) error { -+ return s.decodeAndMergeWith(bb, func(b *[]byte, flag enc.Flag) error { -+ switch flag { -+ case enc.FlagCount, enc.FlagSum, enc.FlagMin, enc.FlagMax: -+ // Exact summary stats are ignored. -+ if len(*b) < 8 { -+ return io.EOF -+ } -+ *b = (*b)[8:] -+ return nil -+ default: -+ return errUnknownFlag -+ } -+ }) -+} -+ -+func (s *DDSketch) decodeAndMergeWith(bb []byte, fallbackDecode func(b *[]byte, flag enc.Flag) error) error { -+ b := &bb -+ for len(*b) > 0 { -+ flag, err := enc.DecodeFlag(b) -+ if err != nil { -+ return err -+ } -+ switch flag.Type() { -+ case enc.FlagTypePositiveStore: -+ s.positiveValueStore.DecodeAndMergeWith(b, flag.SubFlag()) -+ case enc.FlagTypeNegativeStore: -+ s.negativeValueStore.DecodeAndMergeWith(b, flag.SubFlag()) -+ case enc.FlagTypeIndexMapping: -+ decodedIndexMapping, err := mapping.Decode(b, flag) -+ if err != nil { -+ return err -+ } -+ if s.IndexMapping != nil && !s.IndexMapping.Equals(decodedIndexMapping) { -+ return errors.New(""index mapping mismatch"") -+ } -+ s.IndexMapping = decodedIndexMapping -+ default: -+ switch flag { -+ -+ case enc.FlagZeroCountVarFloat: -+ decodedZeroCount, err := enc.DecodeVarfloat64(b) -+ if err != nil { -+ return err -+ } -+ s.zeroCount += decodedZeroCount -+ -+ default: -+ err := fallbackDecode(b, flag) -+ if err != nil { -+ return err -+ } -+ } -+ } -+ } -+ -+ if s.IndexMapping == nil { -+ return errors.New(""missing index mapping"") -+ } -+ return nil -+} -+ -+// ChangeMapping changes the store to a new mapping. -+// it doesn't change s but returns a newly created sketch. -+// positiveStore and negativeStore must be different stores, and be empty when the function is called. -+// It is not the conversion that minimizes the loss in relative -+// accuracy, but it avoids artefacts like empty bins that make the histograms look bad. -+// scaleFactor allows to scale out / in all values. (changing units for eg) -+func (s *DDSketch) ChangeMapping(newMapping mapping.IndexMapping, positiveStore store.Store, negativeStore store.Store, scaleFactor float64) *DDSketch { -+ if scaleFactor == 1 && s.IndexMapping.Equals(newMapping) { -+ return s.Copy() -+ } -+ changeStoreMapping(s.IndexMapping, newMapping, s.positiveValueStore, positiveStore, scaleFactor) -+ changeStoreMapping(s.IndexMapping, newMapping, s.negativeValueStore, negativeStore, scaleFactor) -+ newSketch := NewDDSketch(newMapping, positiveStore, negativeStore) -+ newSketch.zeroCount = s.zeroCount -+ return newSketch -+} -+ -+func changeStoreMapping(oldMapping, newMapping mapping.IndexMapping, oldStore, newStore store.Store, scaleFactor float64) { -+ oldStore.ForEach(func(index int, count float64) (stop bool) { -+ inLowerBound := oldMapping.LowerBound(index) * scaleFactor -+ inHigherBound := oldMapping.LowerBound(index+1) * scaleFactor -+ inSize := inHigherBound - inLowerBound -+ for outIndex := newMapping.Index(inLowerBound); newMapping.LowerBound(outIndex) < inHigherBound; outIndex++ { -+ outLowerBound := newMapping.LowerBound(outIndex) -+ outHigherBound := newMapping.LowerBound(outIndex + 1) -+ lowerIntersectionBound := math.Max(outLowerBound, inLowerBound) -+ higherIntersectionBound := math.Min(outHigherBound, inHigherBound) -+ intersectionSize := higherIntersectionBound - lowerIntersectionBound -+ proportion := intersectionSize / inSize -+ newStore.AddWithCount(outIndex, proportion*count) -+ } -+ return false -+ }) -+} -+ -+// Reweight multiplies all values from the sketch by w, but keeps the same global distribution. -+// w has to be strictly greater than 0. -+func (s *DDSketch) Reweight(w float64) error { -+ if w <= 0 { -+ return errors.New(""can't reweight by a negative factor"") -+ } -+ if w == 1 { -+ return nil -+ } -+ s.zeroCount *= w -+ if err := s.positiveValueStore.Reweight(w); err != nil { -+ return err -+ } -+ if err := s.negativeValueStore.Reweight(w); err != nil { -+ return err -+ } -+ return nil -+} -+ -+// DDSketchWithExactSummaryStatistics returns exact count, sum, min and max, as -+// opposed to DDSketch, which may return approximate values for those -+// statistics. Because of the need to track them exactly, adding and merging -+// operations are slightly more exepensive than those of DDSketch. -+type DDSketchWithExactSummaryStatistics struct { -+ *DDSketch -+ summaryStatistics *stat.SummaryStatistics -+} -+ -+func NewDefaultDDSketchWithExactSummaryStatistics(relativeAccuracy float64) (*DDSketchWithExactSummaryStatistics, error) { -+ sketch, err := NewDefaultDDSketch(relativeAccuracy) -+ if err != nil { -+ return nil, err -+ } -+ return &DDSketchWithExactSummaryStatistics{ -+ DDSketch: sketch, -+ summaryStatistics: stat.NewSummaryStatistics(), -+ }, nil -+} -+ -+func NewDDSketchWithExactSummaryStatistics(mapping mapping.IndexMapping, storeProvider store.Provider) *DDSketchWithExactSummaryStatistics { -+ return &DDSketchWithExactSummaryStatistics{ -+ DDSketch: NewDDSketchFromStoreProvider(mapping, storeProvider), -+ summaryStatistics: stat.NewSummaryStatistics(), -+ } -+} -+ -+// NewDDSketchWithExactSummaryStatisticsFromData constructs DDSketchWithExactSummaryStatistics from the provided sketch and exact summary statistics. -+func NewDDSketchWithExactSummaryStatisticsFromData(sketch *DDSketch, summaryStatistics *stat.SummaryStatistics) (*DDSketchWithExactSummaryStatistics, error) { -+ if sketch.IsEmpty() != (summaryStatistics.Count() == 0) { -+ return nil, errors.New(""sketch and summary statistics do not match"") -+ } -+ return &DDSketchWithExactSummaryStatistics{ -+ DDSketch: sketch, -+ summaryStatistics: summaryStatistics, -+ }, nil -+} -+ -+func (s *DDSketchWithExactSummaryStatistics) IsEmpty() bool { -+ return s.summaryStatistics.Count() == 0 -+} -+ -+func (s *DDSketchWithExactSummaryStatistics) GetCount() float64 { -+ return s.summaryStatistics.Count() -+} -+ -+// GetZeroCount returns the number of zero values that have been added to this sketch. -+// Note: values that are very small (lower than MinIndexableValue if positive, or higher than -MinIndexableValue if negative) -+// are also mapped to the zero bucket. -+func (s *DDSketchWithExactSummaryStatistics) GetZeroCount() float64 { -+ return s.DDSketch.zeroCount -+} -+ -+func (s *DDSketchWithExactSummaryStatistics) GetSum() float64 { -+ return s.summaryStatistics.Sum() -+} -+ -+// GetPositiveValueStore returns the store.Store object that contains the positive -+// values of the sketch. -+func (s *DDSketchWithExactSummaryStatistics) GetPositiveValueStore() (store.Store) { -+ return s.DDSketch.positiveValueStore -+} -+ -+// GetNegativeValueStore returns the store.Store object that contains the negative -+// values of the sketch. -+func (s *DDSketchWithExactSummaryStatistics) GetNegativeValueStore() (store.Store) { -+ return s.DDSketch.negativeValueStore -+} -+ -+func (s *DDSketchWithExactSummaryStatistics) GetMinValue() (float64, error) { -+ if s.DDSketch.IsEmpty() { -+ return math.NaN(), errEmptySketch -+ } -+ return s.summaryStatistics.Min(), nil -+} -+ -+func (s *DDSketchWithExactSummaryStatistics) GetMaxValue() (float64, error) { -+ if s.DDSketch.IsEmpty() { -+ return math.NaN(), errEmptySketch -+ } -+ return s.summaryStatistics.Max(), nil -+} -+ -+func (s *DDSketchWithExactSummaryStatistics) GetValueAtQuantile(quantile float64) (float64, error) { -+ value, err := s.DDSketch.GetValueAtQuantile(quantile) -+ min := s.summaryStatistics.Min() -+ if value < min { -+ return min, err -+ } -+ max := s.summaryStatistics.Max() -+ if value > max { -+ return max, err -+ } -+ return value, err -+} -+ -+func (s *DDSketchWithExactSummaryStatistics) GetValuesAtQuantiles(quantiles []float64) ([]float64, error) { -+ values, err := s.DDSketch.GetValuesAtQuantiles(quantiles) -+ min := s.summaryStatistics.Min() -+ max := s.summaryStatistics.Max() -+ for i := range values { -+ if values[i] < min { -+ values[i] = min -+ } else if values[i] > max { -+ values[i] = max -+ } -+ } -+ return values, err -+} -+ -+func (s *DDSketchWithExactSummaryStatistics) ForEach(f func(value, count float64) (stop bool)) { -+ s.DDSketch.ForEach(f) -+} -+ -+func (s *DDSketchWithExactSummaryStatistics) Clear() { -+ s.DDSketch.Clear() -+ s.summaryStatistics.Clear() -+} -+ -+func (s *DDSketchWithExactSummaryStatistics) Add(value float64) error { -+ err := s.DDSketch.Add(value) -+ if err != nil { -+ return err -+ } -+ s.summaryStatistics.Add(value, 1) -+ return nil -+} -+ -+func (s *DDSketchWithExactSummaryStatistics) AddWithCount(value, count float64) error { -+ if count == 0 { -+ return nil -+ } -+ err := s.DDSketch.AddWithCount(value, count) -+ if err != nil { -+ return err -+ } -+ s.summaryStatistics.Add(value, count) -+ return nil -+} -+ -+func (s *DDSketchWithExactSummaryStatistics) MergeWith(o *DDSketchWithExactSummaryStatistics) error { -+ err := s.DDSketch.MergeWith(o.DDSketch) -+ if err != nil { -+ return err -+ } -+ s.summaryStatistics.MergeWith(o.summaryStatistics) -+ return nil -+} -+ -+func (s *DDSketchWithExactSummaryStatistics) Copy() *DDSketchWithExactSummaryStatistics { -+ return &DDSketchWithExactSummaryStatistics{ -+ DDSketch: s.DDSketch.Copy(), -+ summaryStatistics: s.summaryStatistics.Copy(), -+ } -+} -+ -+func (s *DDSketchWithExactSummaryStatistics) Reweight(factor float64) error { -+ err := s.DDSketch.Reweight(factor) -+ if err != nil { -+ return err -+ } -+ s.summaryStatistics.Reweight(factor) -+ return nil -+} -+ -+func (s *DDSketchWithExactSummaryStatistics) ChangeMapping(newMapping mapping.IndexMapping, storeProvider store.Provider, scaleFactor float64) *DDSketchWithExactSummaryStatistics { -+ summaryStatisticsCopy := s.summaryStatistics.Copy() -+ summaryStatisticsCopy.Rescale(scaleFactor) -+ return &DDSketchWithExactSummaryStatistics{ -+ DDSketch: s.DDSketch.ChangeMapping(newMapping, storeProvider(), storeProvider(), scaleFactor), -+ summaryStatistics: summaryStatisticsCopy, -+ } -+} -+ -+func (s *DDSketchWithExactSummaryStatistics) Encode(b *[]byte, omitIndexMapping bool) { -+ if s.summaryStatistics.Count() != 0 { -+ enc.EncodeFlag(b, enc.FlagCount) -+ enc.EncodeVarfloat64(b, s.summaryStatistics.Count()) -+ } -+ if s.summaryStatistics.Sum() != 0 { -+ enc.EncodeFlag(b, enc.FlagSum) -+ enc.EncodeFloat64LE(b, s.summaryStatistics.Sum()) -+ } -+ if s.summaryStatistics.Min() != math.Inf(1) { -+ enc.EncodeFlag(b, enc.FlagMin) -+ enc.EncodeFloat64LE(b, s.summaryStatistics.Min()) -+ } -+ if s.summaryStatistics.Max() != math.Inf(-1) { -+ enc.EncodeFlag(b, enc.FlagMax) -+ enc.EncodeFloat64LE(b, s.summaryStatistics.Max()) -+ } -+ s.DDSketch.Encode(b, omitIndexMapping) -+} -+ -+// DecodeDDSketchWithExactSummaryStatistics deserializes a sketch. -+// Stores are built using storeProvider. The store type needs not match the -+// store that the serialized sketch initially used. However, using the same -+// store type may make decoding faster. In the absence of high performance -+// requirements, store.DefaultProvider is a sound enough choice of store -+// provider. -+// To avoid memory allocations, it is possible to use a store provider that -+// reuses stores, by calling Clear() on previously used stores before providing -+// the store. -+// If the serialized data does not contain the index mapping, you need to -+// specify the index mapping that was used in the sketch that was encoded. -+// Otherwise, you can use nil and the index mapping will be decoded from the -+// serialized data. -+// It is not possible to decode with this function an encoded DDSketch (unless -+// it is empty), because it does not track exact summary statistics -+func DecodeDDSketchWithExactSummaryStatistics(b []byte, storeProvider store.Provider, indexMapping mapping.IndexMapping) (*DDSketchWithExactSummaryStatistics, error) { -+ s := &DDSketchWithExactSummaryStatistics{ -+ DDSketch: &DDSketch{ -+ IndexMapping: indexMapping, -+ positiveValueStore: storeProvider(), -+ negativeValueStore: storeProvider(), -+ zeroCount: float64(0), -+ }, -+ summaryStatistics: stat.NewSummaryStatistics(), -+ } -+ err := s.DecodeAndMergeWith(b) -+ return s, err -+} -+ -+func (s *DDSketchWithExactSummaryStatistics) DecodeAndMergeWith(bb []byte) error { -+ err := s.DDSketch.decodeAndMergeWith(bb, func(b *[]byte, flag enc.Flag) error { -+ switch flag { -+ case enc.FlagCount: -+ count, err := enc.DecodeVarfloat64(b) -+ if err != nil { -+ return err -+ } -+ s.summaryStatistics.AddToCount(count) -+ return nil -+ case enc.FlagSum: -+ sum, err := enc.DecodeFloat64LE(b) -+ if err != nil { -+ return err -+ } -+ s.summaryStatistics.AddToSum(sum) -+ return nil -+ case enc.FlagMin, enc.FlagMax: -+ stat, err := enc.DecodeFloat64LE(b) -+ if err != nil { -+ return err -+ } -+ s.summaryStatistics.Add(stat, 0) -+ return nil -+ default: -+ return errUnknownFlag -+ } -+ }) -+ if err != nil { -+ return err -+ } -+ // It is assumed that if the count is encoded, other exact summary -+ // statistics are encoded as well, which is the case if Encode is used. -+ if s.summaryStatistics.Count() == 0 && !s.DDSketch.IsEmpty() { -+ return errors.New(""missing exact summary statistics"") -+ } -+ return nil -+} -diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/encoding/encoding.go b/vendor/github.com/DataDog/sketches-go/ddsketch/encoding/encoding.go -new file mode 100644 -index 0000000000000..c50dc1adb9cc4 ---- /dev/null -+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/encoding/encoding.go -@@ -0,0 +1,208 @@ -+// Unless explicitly stated otherwise all files in this repository are licensed -+// under the Apache License 2.0. -+// This product includes software developed at Datadog (https://www.datadoghq.com/). -+// Copyright 2021 Datadog, Inc. -+ -+package encoding -+ -+import ( -+ ""encoding/binary"" -+ ""errors"" -+ ""io"" -+ ""math"" -+ ""math/bits"" -+) -+ -+// Encoding functions append bytes to the provided *[]byte, allowing avoiding -+// allocations if the slice initially has a large enough capacity. -+// Decoding functions also take *[]byte as input, and when they do not return an -+// error, advance the slice so that it starts at the immediate byte after the -+// decoded part (or so that it is empty if there is no such byte). -+ -+const ( -+ MaxVarLen64 = 9 -+ varfloat64Rotate = 6 -+) -+ -+var uvarint64Sizes = initUvarint64Sizes() -+var varfloat64Sizes = initVarfloat64Sizes() -+ -+// EncodeUvarint64 serializes 64-bit unsigned integers 7 bits at a time, -+// starting with the least significant bits. The most significant bit in each -+// output byte is the continuation bit and indicates whether there are -+// additional non-zero bits encoded in following bytes. There are at most 9 -+// output bytes and the last one does not have a continuation bit, allowing for -+// it to encode 8 bits (8*7+8 = 64). -+func EncodeUvarint64(b *[]byte, v uint64) { -+ for i := 0; i < MaxVarLen64-1; i++ { -+ if v < 0x80 { -+ break -+ } -+ *b = append(*b, byte(v)|byte(0x80)) -+ v >>= 7 -+ } -+ *b = append(*b, byte(v)) -+} -+ -+// DecodeUvarint64 deserializes 64-bit unsigned integers that have been encoded -+// using EncodeUvarint64. -+func DecodeUvarint64(b *[]byte) (uint64, error) { -+ x := uint64(0) -+ s := uint(0) -+ for i := 0; ; i++ { -+ if len(*b) <= i { -+ return 0, io.EOF -+ } -+ n := (*b)[i] -+ if n < 0x80 || i == MaxVarLen64-1 { -+ *b = (*b)[i+1:] -+ return x | uint64(n)<>i) -+ sizes[i] = len(b) -+ } -+ return sizes -+} -+ -+// EncodeVarint64 serializes 64-bit signed integers using zig-zag encoding, -+// which ensures small-scale integers are turned into unsigned integers that -+// have leading zeros, whether they are positive or negative, hence allows for -+// space-efficient varuint encoding of those values. -+func EncodeVarint64(b *[]byte, v int64) { -+ EncodeUvarint64(b, uint64(v>>(64-1)^(v<<1))) -+} -+ -+// DecodeVarint64 deserializes 64-bit signed integers that have been encoded -+// using EncodeVarint32. -+func DecodeVarint64(b *[]byte) (int64, error) { -+ v, err := DecodeUvarint64(b) -+ return int64((v >> 1) ^ -(v & 1)), err -+} -+ -+// Varint64Size returns the number of bytes that EncodeVarint64 encodes a 64-bit -+// signed integer into. -+func Varint64Size(v int64) int { -+ return Uvarint64Size(uint64(v>>(64-1) ^ (v << 1))) -+} -+ -+var errVarint32Overflow = errors.New(""varint overflows a 32-bit integer"") -+ -+// DecodeVarint32 deserializes 32-bit signed integers that have been encoded -+// using EncodeVarint64. -+func DecodeVarint32(b *[]byte) (int32, error) { -+ v, err := DecodeVarint64(b) -+ if err != nil { -+ return 0, err -+ } -+ if v > math.MaxInt32 || v < math.MinInt32 { -+ return 0, errVarint32Overflow -+ } -+ return int32(v), nil -+} -+ -+// EncodeFloat64LE serializes 64-bit floating-point values, starting with the -+// least significant bytes. -+func EncodeFloat64LE(b *[]byte, v float64) { -+ *b = append(*b, make([]byte, 8)...) -+ binary.LittleEndian.PutUint64((*b)[len(*b)-8:], math.Float64bits(v)) -+} -+ -+// DecodeFloat64LE deserializes 64-bit floating-point values that have been -+// encoded with EncodeFloat64LE. -+func DecodeFloat64LE(b *[]byte) (float64, error) { -+ if len(*b) < 8 { -+ return 0, io.EOF -+ } -+ v := math.Float64frombits(binary.LittleEndian.Uint64(*b)) -+ *b = (*b)[8:] -+ return v, nil -+} -+ -+// EncodeVarfloat64 serializes 64-bit floating-point values using a method that -+// is similar to the varuint encoding and that is space-efficient for -+// non-negative integer values. The output takes at most 9 bytes. -+// Input values are first shifted as floating-point values (+1), then transmuted -+// to integer values, then shifted again as integer values (-Float64bits(1)). -+// That is in order to minimize the number of non-zero bits when dealing with -+// non-negative integer values. -+// After that transformation, any input integer value no greater than 2^53 (the -+// largest integer value that can be encoded exactly as a 64-bit floating-point -+// value) will have at least 6 leading zero bits. By rotating bits to the left, -+// those bits end up at the right of the binary representation. -+// The resulting bits are then encoded similarly to the varuint method, but -+// starting with the most significant bits. -+func EncodeVarfloat64(b *[]byte, v float64) { -+ x := bits.RotateLeft64(math.Float64bits(v+1)-math.Float64bits(1), varfloat64Rotate) -+ for i := 0; i < MaxVarLen64-1; i++ { -+ n := byte(x >> (8*8 - 7)) -+ x <<= 7 -+ if x == 0 { -+ *b = append(*b, n) -+ return -+ } -+ *b = append(*b, n|byte(0x80)) -+ } -+ n := byte(x >> (8 * 7)) -+ *b = append(*b, n) -+} -+ -+// DecodeVarfloat64 deserializes 64-bit floating-point values that have been -+// encoded with EncodeVarfloat64. -+func DecodeVarfloat64(b *[]byte) (float64, error) { -+ x := uint64(0) -+ i := int(0) -+ s := uint(8*8 - 7) -+ for { -+ if len(*b) <= i { -+ return 0, io.EOF -+ } -+ n := (*b)[i] -+ if i == MaxVarLen64-1 { -+ x |= uint64(n) -+ break -+ } -+ if n < 0x80 { -+ x |= uint64(n) << s -+ break -+ } -+ x |= uint64(n&0x7F) << s -+ i++ -+ s -= 7 -+ } -+ *b = (*b)[i+1:] -+ return math.Float64frombits(bits.RotateLeft64(x, -varfloat64Rotate)+math.Float64bits(1)) - 1, nil -+} -+ -+// Varfloat64Size returns the number of bytes that EncodeVarfloat64 encodes a -+// 64-bit floating-point value into. -+func Varfloat64Size(v float64) int { -+ x := bits.RotateLeft64(math.Float64bits(v+1)-math.Float64bits(1), varfloat64Rotate) -+ return varfloat64Sizes[bits.TrailingZeros64(x)] -+} -+ -+func initVarfloat64Sizes() [65]int { -+ var sizes [65]int -+ b := []byte{} -+ for i := 0; i <= 64; i++ { -+ b = b[:0] -+ EncodeVarfloat64(&b, math.Float64frombits(bits.RotateLeft64(^uint64(0)<>exponentShift) - exponentBias) -+} -+ -+func getSignificandPlusOne(float64Bits uint64) float64 { -+ return math.Float64frombits((float64Bits & significandMask) | oneMask) -+} -+ -+// exponent should be >= -1022 and <= 1023 -+// significandPlusOne should be >= 1 and < 2 -+func buildFloat64(exponent int, significandPlusOne float64) float64 { -+ return math.Float64frombits( -+ (uint64((exponent+exponentBias)<sketches-java -+type CubicallyInterpolatedMapping struct { -+ gamma float64 // base -+ indexOffset float64 -+ multiplier float64 // precomputed for performance -+ minIndexableValue float64 -+ maxIndexableValue float64 -+} -+ -+func NewCubicallyInterpolatedMapping(relativeAccuracy float64) (*CubicallyInterpolatedMapping, error) { -+ if relativeAccuracy <= 0 || relativeAccuracy >= 1 { -+ return nil, errors.New(""The relative accuracy must be between 0 and 1."") -+ } -+ gamma := math.Pow((1+relativeAccuracy)/(1-relativeAccuracy), 10*math.Ln2/7) // > 1 -+ m, _ := NewCubicallyInterpolatedMappingWithGamma(gamma, 0) -+ return m, nil -+} -+ -+func NewCubicallyInterpolatedMappingWithGamma(gamma, indexOffset float64) (*CubicallyInterpolatedMapping, error) { -+ if gamma <= 1 { -+ return nil, errors.New(""Gamma must be greater than 1."") -+ } -+ multiplier := 1 / math.Log2(gamma) -+ adjustedGamma := math.Pow(gamma, 7/(10*math.Ln2)) -+ m := CubicallyInterpolatedMapping{ -+ gamma: gamma, -+ indexOffset: indexOffset, -+ multiplier: multiplier, -+ minIndexableValue: math.Max( -+ math.Exp2((math.MinInt32-indexOffset)/multiplier+1), // so that index >= MinInt32 -+ minNormalFloat64*adjustedGamma, -+ ), -+ maxIndexableValue: math.Min( -+ math.Exp2((math.MaxInt32-indexOffset)/multiplier-1), // so that index <= MaxInt32 -+ math.Exp(expOverflow)/(2*adjustedGamma)*(adjustedGamma+1), // so that math.Exp does not overflow -+ ), -+ } -+ return &m, nil -+} -+ -+func (m *CubicallyInterpolatedMapping) Equals(other IndexMapping) bool { -+ o, ok := other.(*CubicallyInterpolatedMapping) -+ if !ok { -+ return false -+ } -+ tol := 1e-12 -+ return withinTolerance(m.gamma, o.gamma, tol) && withinTolerance(m.indexOffset, o.indexOffset, tol) -+} -+ -+func (m *CubicallyInterpolatedMapping) Index(value float64) int { -+ index := m.approximateLog(value)*m.multiplier + m.indexOffset -+ if index >= 0 { -+ return int(index) -+ } else { -+ return int(index) - 1 -+ } -+} -+ -+func (m *CubicallyInterpolatedMapping) Value(index int) float64 { -+ return m.LowerBound(index) * (1 + m.RelativeAccuracy()) -+} -+ -+func (m *CubicallyInterpolatedMapping) LowerBound(index int) float64 { -+ return m.approximateInverseLog((float64(index) - m.indexOffset) / m.multiplier) -+} -+ -+// Return an approximation of Math.log(x) / Math.log(base(2)). -+func (m *CubicallyInterpolatedMapping) approximateLog(x float64) float64 { -+ bits := math.Float64bits(x) -+ e := getExponent(bits) -+ s := getSignificandPlusOne(bits) - 1 -+ return ((A*s+B)*s+C)*s + e -+} -+ -+// The exact inverse of approximateLog. -+func (m *CubicallyInterpolatedMapping) approximateInverseLog(x float64) float64 { -+ exponent := math.Floor(x) -+ // Derived from Cardano's formula -+ d0 := B*B - 3*A*C -+ d1 := 2*B*B*B - 9*A*B*C - 27*A*A*(x-exponent) -+ p := math.Cbrt((d1 - math.Sqrt(d1*d1-4*d0*d0*d0)) / 2) -+ significandPlusOne := -(B+p+d0/p)/(3*A) + 1 -+ return buildFloat64(int(exponent), significandPlusOne) -+} -+ -+func (m *CubicallyInterpolatedMapping) MinIndexableValue() float64 { -+ return m.minIndexableValue -+} -+ -+func (m *CubicallyInterpolatedMapping) MaxIndexableValue() float64 { -+ return m.maxIndexableValue -+} -+ -+func (m *CubicallyInterpolatedMapping) RelativeAccuracy() float64 { -+ return 1 - 2/(1+math.Exp(7.0/10*math.Log2(m.gamma))) -+} -+ -+func (m *CubicallyInterpolatedMapping) ToProto() *sketchpb.IndexMapping { -+ return &sketchpb.IndexMapping{ -+ Gamma: m.gamma, -+ IndexOffset: m.indexOffset, -+ Interpolation: sketchpb.IndexMapping_CUBIC, -+ } -+} -+ -+func (m *CubicallyInterpolatedMapping) Encode(b *[]byte) { -+ enc.EncodeFlag(b, enc.FlagIndexMappingBaseCubic) -+ enc.EncodeFloat64LE(b, m.gamma) -+ enc.EncodeFloat64LE(b, m.indexOffset) -+} -+ -+func (m *CubicallyInterpolatedMapping) string() string { -+ var buffer bytes.Buffer -+ buffer.WriteString(fmt.Sprintf(""gamma: %v, indexOffset: %v\n"", m.gamma, m.indexOffset)) -+ return buffer.String() -+} -+ -+var _ IndexMapping = (*CubicallyInterpolatedMapping)(nil) -diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/index_mapping.go b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/index_mapping.go -new file mode 100644 -index 0000000000000..f90108eb01f81 ---- /dev/null -+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/index_mapping.go -@@ -0,0 +1,92 @@ -+// Unless explicitly stated otherwise all files in this repository are licensed -+// under the Apache License 2.0. -+// This product includes software developed at Datadog (https://www.datadoghq.com/). -+// Copyright 2021 Datadog, Inc. -+ -+package mapping -+ -+import ( -+ ""errors"" -+ ""fmt"" -+ -+ enc ""github.com/DataDog/sketches-go/ddsketch/encoding"" -+ ""github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"" -+) -+ -+const ( -+ expOverflow = 7.094361393031e+02 // The value at which math.Exp overflows -+ minNormalFloat64 = 2.2250738585072014e-308 //2^(-1022) -+) -+ -+type IndexMapping interface { -+ Equals(other IndexMapping) bool -+ Index(value float64) int -+ Value(index int) float64 -+ LowerBound(index int) float64 -+ RelativeAccuracy() float64 -+ // MinIndexableValue returns the minimum positive value that can be mapped to an index. -+ MinIndexableValue() float64 -+ // MaxIndexableValue returns the maximum positive value that can be mapped to an index. -+ MaxIndexableValue() float64 -+ ToProto() *sketchpb.IndexMapping -+ // Encode encodes a mapping and appends its content to the provided []byte. -+ Encode(b *[]byte) -+} -+ -+func NewDefaultMapping(relativeAccuracy float64) (IndexMapping, error) { -+ return NewLogarithmicMapping(relativeAccuracy) -+} -+ -+// FromProto returns an Index mapping from the protobuf definition of it -+func FromProto(m *sketchpb.IndexMapping) (IndexMapping, error) { -+ switch m.Interpolation { -+ case sketchpb.IndexMapping_NONE: -+ return NewLogarithmicMappingWithGamma(m.Gamma, m.IndexOffset) -+ case sketchpb.IndexMapping_LINEAR: -+ return NewLinearlyInterpolatedMappingWithGamma(m.Gamma, m.IndexOffset) -+ case sketchpb.IndexMapping_CUBIC: -+ return NewCubicallyInterpolatedMappingWithGamma(m.Gamma, m.IndexOffset) -+ default: -+ return nil, fmt.Errorf(""interpolation not supported: %d"", m.Interpolation) -+ } -+} -+ -+// Decode decodes a mapping and updates the provided []byte so that it starts -+// immediately after the encoded mapping. -+func Decode(b *[]byte, flag enc.Flag) (IndexMapping, error) { -+ switch flag { -+ -+ case enc.FlagIndexMappingBaseLogarithmic: -+ gamma, indexOffset, err := decodeLogLikeIndexMapping(b) -+ if err != nil { -+ return nil, err -+ } -+ return NewLogarithmicMappingWithGamma(gamma, indexOffset) -+ -+ case enc.FlagIndexMappingBaseLinear: -+ gamma, indexOffset, err := decodeLogLikeIndexMapping(b) -+ if err != nil { -+ return nil, err -+ } -+ return NewLinearlyInterpolatedMappingWithGamma(gamma, indexOffset) -+ -+ case enc.FlagIndexMappingBaseCubic: -+ gamma, indexOffset, err := decodeLogLikeIndexMapping(b) -+ if err != nil { -+ return nil, err -+ } -+ return NewCubicallyInterpolatedMappingWithGamma(gamma, indexOffset) -+ -+ default: -+ return nil, errors.New(""unknown mapping"") -+ } -+} -+ -+func decodeLogLikeIndexMapping(b *[]byte) (gamma, indexOffset float64, err error) { -+ gamma, err = enc.DecodeFloat64LE(b) -+ if err != nil { -+ return -+ } -+ indexOffset, err = enc.DecodeFloat64LE(b) -+ return -+} -diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/linearly_interpolated_mapping.go b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/linearly_interpolated_mapping.go -new file mode 100644 -index 0000000000000..d9b0b7408a265 ---- /dev/null -+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/linearly_interpolated_mapping.go -@@ -0,0 +1,142 @@ -+// Unless explicitly stated otherwise all files in this repository are licensed -+// under the Apache License 2.0. -+// This product includes software developed at Datadog (https://www.datadoghq.com/). -+// Copyright 2021 Datadog, Inc. -+ -+package mapping -+ -+import ( -+ ""bytes"" -+ ""errors"" -+ ""fmt"" -+ ""math"" -+ -+ enc ""github.com/DataDog/sketches-go/ddsketch/encoding"" -+ ""github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"" -+) -+ -+// LinearlyInterpolatedMapping is a fast IndexMapping that approximates the -+// memory-optimal LogarithmicMapping by extracting the floor value of the -+// logarithm to the base 2 from the binary representations of floating-point -+// values and linearly interpolating the logarithm in-between. -+type LinearlyInterpolatedMapping struct { -+ gamma float64 // base -+ indexOffset float64 -+ multiplier float64 // precomputed for performance -+ minIndexableValue float64 -+ maxIndexableValue float64 -+} -+ -+func NewLinearlyInterpolatedMapping(relativeAccuracy float64) (*LinearlyInterpolatedMapping, error) { -+ if relativeAccuracy <= 0 || relativeAccuracy >= 1 { -+ return nil, errors.New(""The relative accuracy must be between 0 and 1."") -+ } -+ gamma := math.Pow((1+relativeAccuracy)/(1-relativeAccuracy), math.Ln2) // > 1 -+ indexOffset := 1 / math.Log2(gamma) // for backward compatibility -+ m, _ := NewLinearlyInterpolatedMappingWithGamma(gamma, indexOffset) -+ return m, nil -+} -+ -+func NewLinearlyInterpolatedMappingWithGamma(gamma, indexOffset float64) (*LinearlyInterpolatedMapping, error) { -+ if gamma <= 1 { -+ return nil, errors.New(""Gamma must be greater than 1."") -+ } -+ multiplier := 1 / math.Log2(gamma) -+ adjustedGamma := math.Pow(gamma, 1/math.Ln2) -+ m := LinearlyInterpolatedMapping{ -+ gamma: gamma, -+ indexOffset: indexOffset, -+ multiplier: multiplier, -+ minIndexableValue: math.Max( -+ math.Exp2((math.MinInt32-indexOffset)/multiplier+1), // so that index >= MinInt32 -+ minNormalFloat64*adjustedGamma, -+ ), -+ maxIndexableValue: math.Min( -+ math.Exp2((math.MaxInt32-indexOffset)/multiplier-1), // so that index <= MaxInt32 -+ math.Exp(expOverflow)/(2*adjustedGamma)*(adjustedGamma+1), // so that math.Exp does not overflow -+ ), -+ } -+ return &m, nil -+} -+ -+func (m *LinearlyInterpolatedMapping) Equals(other IndexMapping) bool { -+ o, ok := other.(*LinearlyInterpolatedMapping) -+ if !ok { -+ return false -+ } -+ tol := 1e-12 -+ return withinTolerance(m.gamma, o.gamma, tol) && withinTolerance(m.indexOffset, o.indexOffset, tol) -+} -+ -+func (m *LinearlyInterpolatedMapping) Index(value float64) int { -+ index := m.approximateLog(value)*m.multiplier + m.indexOffset -+ if index >= 0 { -+ return int(index) -+ } else { -+ return int(index) - 1 -+ } -+} -+ -+func (m *LinearlyInterpolatedMapping) Value(index int) float64 { -+ return m.LowerBound(index) * (1 + m.RelativeAccuracy()) -+} -+ -+func (m *LinearlyInterpolatedMapping) LowerBound(index int) float64 { -+ return m.approximateInverseLog((float64(index) - m.indexOffset) / m.multiplier) -+} -+ -+// Return an approximation of Math.log(x) / Math.log(2) -+func (m *LinearlyInterpolatedMapping) approximateLog(x float64) float64 { -+ bits := math.Float64bits(x) -+ return getExponent(bits) + getSignificandPlusOne(bits) - 1 -+} -+ -+// The exact inverse of approximateLog. -+func (m *LinearlyInterpolatedMapping) approximateInverseLog(x float64) float64 { -+ exponent := math.Floor(x) -+ significandPlusOne := x - exponent + 1 -+ return buildFloat64(int(exponent), significandPlusOne) -+} -+ -+func (m *LinearlyInterpolatedMapping) MinIndexableValue() float64 { -+ return m.minIndexableValue -+} -+ -+func (m *LinearlyInterpolatedMapping) MaxIndexableValue() float64 { -+ return m.maxIndexableValue -+} -+ -+func (m *LinearlyInterpolatedMapping) RelativeAccuracy() float64 { -+ return 1 - 2/(1+math.Exp(math.Log2(m.gamma))) -+} -+ -+// Generates a protobuf representation of this LinearlyInterpolatedMapping. -+func (m *LinearlyInterpolatedMapping) ToProto() *sketchpb.IndexMapping { -+ return &sketchpb.IndexMapping{ -+ Gamma: m.gamma, -+ IndexOffset: m.indexOffset, -+ Interpolation: sketchpb.IndexMapping_LINEAR, -+ } -+} -+ -+func (m *LinearlyInterpolatedMapping) Encode(b *[]byte) { -+ enc.EncodeFlag(b, enc.FlagIndexMappingBaseLinear) -+ enc.EncodeFloat64LE(b, m.gamma) -+ enc.EncodeFloat64LE(b, m.indexOffset) -+} -+ -+func (m *LinearlyInterpolatedMapping) string() string { -+ var buffer bytes.Buffer -+ buffer.WriteString(fmt.Sprintf(""gamma: %v, indexOffset: %v\n"", m.gamma, m.indexOffset)) -+ return buffer.String() -+} -+ -+func withinTolerance(x, y, tolerance float64) bool { -+ if x == 0 || y == 0 { -+ return math.Abs(x) <= tolerance && math.Abs(y) <= tolerance -+ } else { -+ return math.Abs(x-y) <= tolerance*math.Max(math.Abs(x), math.Abs(y)) -+ } -+} -+ -+var _ IndexMapping = (*LinearlyInterpolatedMapping)(nil) -diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/logarithmic_mapping.go b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/logarithmic_mapping.go -new file mode 100644 -index 0000000000000..474e74d932001 ---- /dev/null -+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/mapping/logarithmic_mapping.go -@@ -0,0 +1,119 @@ -+// Unless explicitly stated otherwise all files in this repository are licensed -+// under the Apache License 2.0. -+// This product includes software developed at Datadog (https://www.datadoghq.com/). -+// Copyright 2021 Datadog, Inc. -+ -+package mapping -+ -+import ( -+ ""bytes"" -+ ""errors"" -+ ""fmt"" -+ ""math"" -+ -+ enc ""github.com/DataDog/sketches-go/ddsketch/encoding"" -+ ""github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"" -+) -+ -+// LogarithmicMapping is an IndexMapping that is memory-optimal, that is to say -+// that given a targeted relative accuracy, it requires the least number of -+// indices to cover a given range of values. This is done by logarithmically -+// mapping floating-point values to integers. -+type LogarithmicMapping struct { -+ gamma float64 // base -+ indexOffset float64 -+ multiplier float64 // precomputed for performance -+ minIndexableValue float64 -+ maxIndexableValue float64 -+} -+ -+func NewLogarithmicMapping(relativeAccuracy float64) (*LogarithmicMapping, error) { -+ if relativeAccuracy <= 0 || relativeAccuracy >= 1 { -+ return nil, errors.New(""The relative accuracy must be between 0 and 1."") -+ } -+ gamma := (1 + relativeAccuracy) / (1 - relativeAccuracy) // > 1 -+ m, _ := NewLogarithmicMappingWithGamma(gamma, 0) -+ return m, nil -+} -+ -+func NewLogarithmicMappingWithGamma(gamma, indexOffset float64) (*LogarithmicMapping, error) { -+ if gamma <= 1 { -+ return nil, errors.New(""Gamma must be greater than 1."") -+ } -+ multiplier := 1 / math.Log(gamma) -+ m := &LogarithmicMapping{ -+ gamma: gamma, -+ indexOffset: indexOffset, -+ multiplier: multiplier, -+ minIndexableValue: math.Max( -+ math.Exp((math.MinInt32-indexOffset)/multiplier+1), // so that index >= MinInt32 -+ minNormalFloat64*gamma, -+ ), -+ maxIndexableValue: math.Min( -+ math.Exp((math.MaxInt32-indexOffset)/multiplier-1), // so that index <= MaxInt32 -+ math.Exp(expOverflow)/(2*gamma)*(gamma+1), // so that math.Exp does not overflow -+ ), -+ } -+ return m, nil -+} -+ -+func (m *LogarithmicMapping) Equals(other IndexMapping) bool { -+ o, ok := other.(*LogarithmicMapping) -+ if !ok { -+ return false -+ } -+ tol := 1e-12 -+ return withinTolerance(m.gamma, o.gamma, tol) && withinTolerance(m.indexOffset, o.indexOffset, tol) -+} -+ -+func (m *LogarithmicMapping) Index(value float64) int { -+ index := math.Log(value)*m.multiplier + m.indexOffset -+ if index >= 0 { -+ return int(index) -+ } else { -+ return int(index) - 1 // faster than Math.Floor -+ } -+} -+ -+func (m *LogarithmicMapping) Value(index int) float64 { -+ return m.LowerBound(index) * (1 + m.RelativeAccuracy()) -+} -+ -+func (m *LogarithmicMapping) LowerBound(index int) float64 { -+ return math.Exp((float64(index) - m.indexOffset) / m.multiplier) -+} -+ -+func (m *LogarithmicMapping) MinIndexableValue() float64 { -+ return m.minIndexableValue -+} -+ -+func (m *LogarithmicMapping) MaxIndexableValue() float64 { -+ return m.maxIndexableValue -+} -+ -+func (m *LogarithmicMapping) RelativeAccuracy() float64 { -+ return 1 - 2/(1+m.gamma) -+} -+ -+// Generates a protobuf representation of this LogarithicMapping. -+func (m *LogarithmicMapping) ToProto() *sketchpb.IndexMapping { -+ return &sketchpb.IndexMapping{ -+ Gamma: m.gamma, -+ IndexOffset: m.indexOffset, -+ Interpolation: sketchpb.IndexMapping_NONE, -+ } -+} -+ -+func (m *LogarithmicMapping) Encode(b *[]byte) { -+ enc.EncodeFlag(b, enc.FlagIndexMappingBaseLogarithmic) -+ enc.EncodeFloat64LE(b, m.gamma) -+ enc.EncodeFloat64LE(b, m.indexOffset) -+} -+ -+func (m *LogarithmicMapping) string() string { -+ var buffer bytes.Buffer -+ buffer.WriteString(fmt.Sprintf(""gamma: %v, indexOffset: %v\n"", m.gamma, m.indexOffset)) -+ return buffer.String() -+} -+ -+var _ IndexMapping = (*LogarithmicMapping)(nil) -diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/pb/sketchpb/ddsketch.pb.go b/vendor/github.com/DataDog/sketches-go/ddsketch/pb/sketchpb/ddsketch.pb.go -new file mode 100644 -index 0000000000000..9dbd6f2930c2f ---- /dev/null -+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/pb/sketchpb/ddsketch.pb.go -@@ -0,0 +1,448 @@ -+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License 2.0. -+// This product includes software developed at Datadog (https://www.datadoghq.com/). -+// Copyright 2021 Datadog, Inc. -+ -+// Code generated by protoc-gen-go. DO NOT EDIT. -+// versions: -+// protoc-gen-go v1.28.0 -+// protoc v3.19.4 -+// source: ddsketch.proto -+ -+package sketchpb -+ -+import ( -+ protoreflect ""google.golang.org/protobuf/reflect/protoreflect"" -+ protoimpl ""google.golang.org/protobuf/runtime/protoimpl"" -+ reflect ""reflect"" -+ sync ""sync"" -+) -+ -+const ( -+ // Verify that this generated code is sufficiently up-to-date. -+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) -+ // Verify that runtime/protoimpl is sufficiently up-to-date. -+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -+) -+ -+type IndexMapping_Interpolation int32 -+ -+const ( -+ IndexMapping_NONE IndexMapping_Interpolation = 0 -+ IndexMapping_LINEAR IndexMapping_Interpolation = 1 -+ IndexMapping_QUADRATIC IndexMapping_Interpolation = 2 -+ IndexMapping_CUBIC IndexMapping_Interpolation = 3 -+) -+ -+// Enum value maps for IndexMapping_Interpolation. -+var ( -+ IndexMapping_Interpolation_name = map[int32]string{ -+ 0: ""NONE"", -+ 1: ""LINEAR"", -+ 2: ""QUADRATIC"", -+ 3: ""CUBIC"", -+ } -+ IndexMapping_Interpolation_value = map[string]int32{ -+ ""NONE"": 0, -+ ""LINEAR"": 1, -+ ""QUADRATIC"": 2, -+ ""CUBIC"": 3, -+ } -+) -+ -+func (x IndexMapping_Interpolation) Enum() *IndexMapping_Interpolation { -+ p := new(IndexMapping_Interpolation) -+ *p = x -+ return p -+} -+ -+func (x IndexMapping_Interpolation) String() string { -+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -+} -+ -+func (IndexMapping_Interpolation) Descriptor() protoreflect.EnumDescriptor { -+ return file_ddsketch_proto_enumTypes[0].Descriptor() -+} -+ -+func (IndexMapping_Interpolation) Type() protoreflect.EnumType { -+ return &file_ddsketch_proto_enumTypes[0] -+} -+ -+func (x IndexMapping_Interpolation) Number() protoreflect.EnumNumber { -+ return protoreflect.EnumNumber(x) -+} -+ -+// Deprecated: Use IndexMapping_Interpolation.Descriptor instead. -+func (IndexMapping_Interpolation) EnumDescriptor() ([]byte, []int) { -+ return file_ddsketch_proto_rawDescGZIP(), []int{1, 0} -+} -+ -+// A DDSketch is essentially a histogram that partitions the range of positive values into an infinite number of -+// indexed bins whose size grows exponentially. It keeps track of the number of values (or possibly floating-point -+// weights) added to each bin. Negative values are partitioned like positive values, symmetrically to zero. -+// The value zero as well as its close neighborhood that would be mapped to extreme bin indexes is mapped to a specific -+// counter. -+type DDSketch struct { -+ state protoimpl.MessageState -+ sizeCache protoimpl.SizeCache -+ unknownFields protoimpl.UnknownFields -+ -+ // The mapping between positive values and the bin indexes they belong to. -+ Mapping *IndexMapping `protobuf:""bytes,1,opt,name=mapping,proto3"" json:""mapping,omitempty""` -+ // The store for keeping track of positive values. -+ PositiveValues *Store `protobuf:""bytes,2,opt,name=positiveValues,proto3"" json:""positiveValues,omitempty""` -+ // The store for keeping track of negative values. A negative value v is mapped using its positive opposite -v. -+ NegativeValues *Store `protobuf:""bytes,3,opt,name=negativeValues,proto3"" json:""negativeValues,omitempty""` -+ // The count for the value zero and its close neighborhood (whose width depends on the mapping). -+ ZeroCount float64 `protobuf:""fixed64,4,opt,name=zeroCount,proto3"" json:""zeroCount,omitempty""` -+} -+ -+func (x *DDSketch) Reset() { -+ *x = DDSketch{} -+ if protoimpl.UnsafeEnabled { -+ mi := &file_ddsketch_proto_msgTypes[0] -+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) -+ ms.StoreMessageInfo(mi) -+ } -+} -+ -+func (x *DDSketch) String() string { -+ return protoimpl.X.MessageStringOf(x) -+} -+ -+func (*DDSketch) ProtoMessage() {} -+ -+func (x *DDSketch) ProtoReflect() protoreflect.Message { -+ mi := &file_ddsketch_proto_msgTypes[0] -+ if protoimpl.UnsafeEnabled && x != nil { -+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) -+ if ms.LoadMessageInfo() == nil { -+ ms.StoreMessageInfo(mi) -+ } -+ return ms -+ } -+ return mi.MessageOf(x) -+} -+ -+// Deprecated: Use DDSketch.ProtoReflect.Descriptor instead. -+func (*DDSketch) Descriptor() ([]byte, []int) { -+ return file_ddsketch_proto_rawDescGZIP(), []int{0} -+} -+ -+func (x *DDSketch) GetMapping() *IndexMapping { -+ if x != nil { -+ return x.Mapping -+ } -+ return nil -+} -+ -+func (x *DDSketch) GetPositiveValues() *Store { -+ if x != nil { -+ return x.PositiveValues -+ } -+ return nil -+} -+ -+func (x *DDSketch) GetNegativeValues() *Store { -+ if x != nil { -+ return x.NegativeValues -+ } -+ return nil -+} -+ -+func (x *DDSketch) GetZeroCount() float64 { -+ if x != nil { -+ return x.ZeroCount -+ } -+ return 0 -+} -+ -+// How to map positive values to the bins they belong to. -+type IndexMapping struct { -+ state protoimpl.MessageState -+ sizeCache protoimpl.SizeCache -+ unknownFields protoimpl.UnknownFields -+ -+ // The gamma parameter of the mapping, such that bin index that a value v belongs to is roughly equal to -+ // log(v)/log(gamma). -+ Gamma float64 `protobuf:""fixed64,1,opt,name=gamma,proto3"" json:""gamma,omitempty""` -+ // An offset that can be used to shift all bin indexes. -+ IndexOffset float64 `protobuf:""fixed64,2,opt,name=indexOffset,proto3"" json:""indexOffset,omitempty""` -+ // To speed up the computation of the index a value belongs to, the computation of the log may be approximated using -+ // the fact that the log to the base 2 of powers of 2 can be computed at a low cost from the binary representation of -+ // the input value. Other values can be approximated by interpolating between successive powers of 2 (linearly, -+ // quadratically or cubically). -+ // NONE means that the log is to be computed exactly (no interpolation). -+ Interpolation IndexMapping_Interpolation `protobuf:""varint,3,opt,name=interpolation,proto3,enum=IndexMapping_Interpolation"" json:""interpolation,omitempty""` -+} -+ -+func (x *IndexMapping) Reset() { -+ *x = IndexMapping{} -+ if protoimpl.UnsafeEnabled { -+ mi := &file_ddsketch_proto_msgTypes[1] -+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) -+ ms.StoreMessageInfo(mi) -+ } -+} -+ -+func (x *IndexMapping) String() string { -+ return protoimpl.X.MessageStringOf(x) -+} -+ -+func (*IndexMapping) ProtoMessage() {} -+ -+func (x *IndexMapping) ProtoReflect() protoreflect.Message { -+ mi := &file_ddsketch_proto_msgTypes[1] -+ if protoimpl.UnsafeEnabled && x != nil { -+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) -+ if ms.LoadMessageInfo() == nil { -+ ms.StoreMessageInfo(mi) -+ } -+ return ms -+ } -+ return mi.MessageOf(x) -+} -+ -+// Deprecated: Use IndexMapping.ProtoReflect.Descriptor instead. -+func (*IndexMapping) Descriptor() ([]byte, []int) { -+ return file_ddsketch_proto_rawDescGZIP(), []int{1} -+} -+ -+func (x *IndexMapping) GetGamma() float64 { -+ if x != nil { -+ return x.Gamma -+ } -+ return 0 -+} -+ -+func (x *IndexMapping) GetIndexOffset() float64 { -+ if x != nil { -+ return x.IndexOffset -+ } -+ return 0 -+} -+ -+func (x *IndexMapping) GetInterpolation() IndexMapping_Interpolation { -+ if x != nil { -+ return x.Interpolation -+ } -+ return IndexMapping_NONE -+} -+ -+// A Store maps bin indexes to their respective counts. -+// Counts can be encoded sparsely using binCounts, but also in a contiguous way using contiguousBinCounts and -+// contiguousBinIndexOffset. Given that non-empty bins are in practice usually contiguous or close to one another, the -+// latter contiguous encoding method is usually more efficient than the sparse one. -+// Both encoding methods can be used conjointly. If a bin appears in both the sparse and the contiguous encodings, its -+// count value is the sum of the counts in each encodings. -+type Store struct { -+ state protoimpl.MessageState -+ sizeCache protoimpl.SizeCache -+ unknownFields protoimpl.UnknownFields -+ -+ // The bin counts, encoded sparsely. -+ BinCounts map[int32]float64 `protobuf:""bytes,1,rep,name=binCounts,proto3"" json:""binCounts,omitempty"" protobuf_key:""zigzag32,1,opt,name=key,proto3"" protobuf_val:""fixed64,2,opt,name=value,proto3""` -+ // The bin counts, encoded contiguously. The values of contiguousBinCounts are the counts for the bins of indexes -+ // o, o+1, o+2, etc., where o is contiguousBinIndexOffset. -+ ContiguousBinCounts []float64 `protobuf:""fixed64,2,rep,packed,name=contiguousBinCounts,proto3"" json:""contiguousBinCounts,omitempty""` -+ ContiguousBinIndexOffset int32 `protobuf:""zigzag32,3,opt,name=contiguousBinIndexOffset,proto3"" json:""contiguousBinIndexOffset,omitempty""` -+} -+ -+func (x *Store) Reset() { -+ *x = Store{} -+ if protoimpl.UnsafeEnabled { -+ mi := &file_ddsketch_proto_msgTypes[2] -+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) -+ ms.StoreMessageInfo(mi) -+ } -+} -+ -+func (x *Store) String() string { -+ return protoimpl.X.MessageStringOf(x) -+} -+ -+func (*Store) ProtoMessage() {} -+ -+func (x *Store) ProtoReflect() protoreflect.Message { -+ mi := &file_ddsketch_proto_msgTypes[2] -+ if protoimpl.UnsafeEnabled && x != nil { -+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) -+ if ms.LoadMessageInfo() == nil { -+ ms.StoreMessageInfo(mi) -+ } -+ return ms -+ } -+ return mi.MessageOf(x) -+} -+ -+// Deprecated: Use Store.ProtoReflect.Descriptor instead. -+func (*Store) Descriptor() ([]byte, []int) { -+ return file_ddsketch_proto_rawDescGZIP(), []int{2} -+} -+ -+func (x *Store) GetBinCounts() map[int32]float64 { -+ if x != nil { -+ return x.BinCounts -+ } -+ return nil -+} -+ -+func (x *Store) GetContiguousBinCounts() []float64 { -+ if x != nil { -+ return x.ContiguousBinCounts -+ } -+ return nil -+} -+ -+func (x *Store) GetContiguousBinIndexOffset() int32 { -+ if x != nil { -+ return x.ContiguousBinIndexOffset -+ } -+ return 0 -+} -+ -+var File_ddsketch_proto protoreflect.FileDescriptor -+ -+var file_ddsketch_proto_rawDesc = []byte{ -+ 0x0a, 0x0e, 0x64, 0x64, 0x73, 0x6b, 0x65, 0x74, 0x63, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, -+ 0x22, 0xb1, 0x01, 0x0a, 0x08, 0x44, 0x44, 0x53, 0x6b, 0x65, 0x74, 0x63, 0x68, 0x12, 0x27, 0x0a, -+ 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, -+ 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6d, -+ 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x2e, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, -+ 0x76, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x06, -+ 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, -+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, -+ 0x76, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x06, -+ 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, -+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, -+ 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f, 0x43, -+ 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xca, 0x01, 0x0a, 0x0c, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4d, 0x61, -+ 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x67, 0x61, 0x6d, 0x6d, 0x61, 0x18, 0x01, -+ 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x67, 0x61, 0x6d, 0x6d, 0x61, 0x12, 0x20, 0x0a, 0x0b, 0x69, -+ 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, -+ 0x52, 0x0b, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x41, 0x0a, -+ 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, -+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4d, 0x61, 0x70, 0x70, -+ 0x69, 0x6e, 0x67, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, -+ 0x6e, 0x52, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, -+ 0x22, 0x3f, 0x0a, 0x0d, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, -+ 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4c, -+ 0x49, 0x4e, 0x45, 0x41, 0x52, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x51, 0x55, 0x41, 0x44, 0x52, -+ 0x41, 0x54, 0x49, 0x43, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x55, 0x42, 0x49, 0x43, 0x10, -+ 0x03, 0x22, 0xec, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x33, 0x0a, 0x09, 0x62, -+ 0x69, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, -+ 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x69, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, -+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x62, 0x69, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, -+ 0x12, 0x34, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x67, 0x75, 0x6f, 0x75, 0x73, 0x42, 0x69, -+ 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x01, 0x42, 0x02, 0x10, -+ 0x01, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x67, 0x75, 0x6f, 0x75, 0x73, 0x42, 0x69, 0x6e, -+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x18, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x67, -+ 0x75, 0x6f, 0x75, 0x73, 0x42, 0x69, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, 0x66, 0x73, -+ 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x11, 0x52, 0x18, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x67, -+ 0x75, 0x6f, 0x75, 0x73, 0x42, 0x69, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x66, 0x66, 0x73, -+ 0x65, 0x74, 0x1a, 0x3c, 0x0a, 0x0e, 0x42, 0x69, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, -+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, -+ 0x11, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, -+ 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, -+ 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, -+ 0x61, 0x74, 0x61, 0x44, 0x6f, 0x67, 0x2f, 0x73, 0x6b, 0x65, 0x74, 0x63, 0x68, 0x65, 0x73, 0x2d, -+ 0x67, 0x6f, 0x2f, 0x64, 0x64, 0x73, 0x6b, 0x65, 0x74, 0x63, 0x68, 0x2f, 0x70, 0x62, 0x2f, 0x73, -+ 0x6b, 0x65, 0x74, 0x63, 0x68, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -+} -+ -+var ( -+ file_ddsketch_proto_rawDescOnce sync.Once -+ file_ddsketch_proto_rawDescData = file_ddsketch_proto_rawDesc -+) -+ -+func file_ddsketch_proto_rawDescGZIP() []byte { -+ file_ddsketch_proto_rawDescOnce.Do(func() { -+ file_ddsketch_proto_rawDescData = protoimpl.X.CompressGZIP(file_ddsketch_proto_rawDescData) -+ }) -+ return file_ddsketch_proto_rawDescData -+} -+ -+var file_ddsketch_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -+var file_ddsketch_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -+var file_ddsketch_proto_goTypes = []interface{}{ -+ (IndexMapping_Interpolation)(0), // 0: IndexMapping.Interpolation -+ (*DDSketch)(nil), // 1: DDSketch -+ (*IndexMapping)(nil), // 2: IndexMapping -+ (*Store)(nil), // 3: Store -+ nil, // 4: Store.BinCountsEntry -+} -+var file_ddsketch_proto_depIdxs = []int32{ -+ 2, // 0: DDSketch.mapping:type_name -> IndexMapping -+ 3, // 1: DDSketch.positiveValues:type_name -> Store -+ 3, // 2: DDSketch.negativeValues:type_name -> Store -+ 0, // 3: IndexMapping.interpolation:type_name -> IndexMapping.Interpolation -+ 4, // 4: Store.binCounts:type_name -> Store.BinCountsEntry -+ 5, // [5:5] is the sub-list for method output_type -+ 5, // [5:5] is the sub-list for method input_type -+ 5, // [5:5] is the sub-list for extension type_name -+ 5, // [5:5] is the sub-list for extension extendee -+ 0, // [0:5] is the sub-list for field type_name -+} -+ -+func init() { file_ddsketch_proto_init() } -+func file_ddsketch_proto_init() { -+ if File_ddsketch_proto != nil { -+ return -+ } -+ if !protoimpl.UnsafeEnabled { -+ file_ddsketch_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { -+ switch v := v.(*DDSketch); i { -+ case 0: -+ return &v.state -+ case 1: -+ return &v.sizeCache -+ case 2: -+ return &v.unknownFields -+ default: -+ return nil -+ } -+ } -+ file_ddsketch_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { -+ switch v := v.(*IndexMapping); i { -+ case 0: -+ return &v.state -+ case 1: -+ return &v.sizeCache -+ case 2: -+ return &v.unknownFields -+ default: -+ return nil -+ } -+ } -+ file_ddsketch_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { -+ switch v := v.(*Store); i { -+ case 0: -+ return &v.state -+ case 1: -+ return &v.sizeCache -+ case 2: -+ return &v.unknownFields -+ default: -+ return nil -+ } -+ } -+ } -+ type x struct{} -+ out := protoimpl.TypeBuilder{ -+ File: protoimpl.DescBuilder{ -+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), -+ RawDescriptor: file_ddsketch_proto_rawDesc, -+ NumEnums: 1, -+ NumMessages: 4, -+ NumExtensions: 0, -+ NumServices: 0, -+ }, -+ GoTypes: file_ddsketch_proto_goTypes, -+ DependencyIndexes: file_ddsketch_proto_depIdxs, -+ EnumInfos: file_ddsketch_proto_enumTypes, -+ MessageInfos: file_ddsketch_proto_msgTypes, -+ }.Build() -+ File_ddsketch_proto = out.File -+ file_ddsketch_proto_rawDesc = nil -+ file_ddsketch_proto_goTypes = nil -+ file_ddsketch_proto_depIdxs = nil -+} -diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/stat/summary.go b/vendor/github.com/DataDog/sketches-go/ddsketch/stat/summary.go -new file mode 100644 -index 0000000000000..5d56f39445bb1 ---- /dev/null -+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/stat/summary.go -@@ -0,0 +1,171 @@ -+// Unless explicitly stated otherwise all files in this repository are licensed -+// under the Apache License 2.0. -+// This product includes software developed at Datadog (https://www.datadoghq.com/). -+// Copyright 2021 Datadog, Inc. -+ -+package stat -+ -+import ( -+ ""fmt"" -+ ""math"" -+) -+ -+// SummaryStatistics keeps track of the count, the sum, the min and the max of -+// recorded values. We use a compensated sum to avoid accumulating rounding -+// errors (see https://en.wikipedia.org/wiki/Kahan_summation_algorithm). -+type SummaryStatistics struct { -+ count float64 -+ sum float64 -+ sumCompensation float64 -+ simpleSum float64 -+ min float64 -+ max float64 -+} -+ -+func NewSummaryStatistics() *SummaryStatistics { -+ return &SummaryStatistics{ -+ count: 0, -+ sum: 0, -+ sumCompensation: 0, -+ simpleSum: 0, -+ min: math.Inf(1), -+ max: math.Inf(-1), -+ } -+} -+ -+// NewSummaryStatisticsFromData constructs SummaryStatistics from the provided data. -+func NewSummaryStatisticsFromData(count, sum, min, max float64) (*SummaryStatistics, error) { -+ if !(count >= 0) { -+ return nil, fmt.Errorf(""count (%g) must be positive or zero"", count) -+ } -+ if count > 0 && min > max { -+ return nil, fmt.Errorf(""min (%g) cannot be greater than max (%g) if count (%g) is positive"", min, max, count) -+ } -+ if count == 0 && (min != math.Inf(1) || max != math.Inf(-1)) { -+ return nil, fmt.Errorf(""empty summary statistics must have min (%g) and max (%g) equal to positive and negative infinities respectively"", min, max) -+ } -+ return &SummaryStatistics{ -+ count: count, -+ sum: sum, -+ sumCompensation: 0, -+ simpleSum: sum, -+ min: min, -+ max: max, -+ }, nil -+} -+ -+func (s *SummaryStatistics) Count() float64 { -+ return s.count -+} -+ -+func (s *SummaryStatistics) Sum() float64 { -+ // Better error bounds to add both terms as the final sum -+ tmp := s.sum + s.sumCompensation -+ if math.IsNaN(tmp) && math.IsInf(s.simpleSum, 0) { -+ // If the compensated sum is spuriously NaN from accumulating one or more same-signed infinite -+ // values, return the correctly-signed infinity stored in simpleSum. -+ return s.simpleSum -+ } else { -+ return tmp -+ } -+} -+ -+func (s *SummaryStatistics) Min() float64 { -+ return s.min -+} -+ -+func (s *SummaryStatistics) Max() float64 { -+ return s.max -+} -+ -+func (s *SummaryStatistics) Add(value, count float64) { -+ s.AddToCount(count) -+ s.AddToSum(value * count) -+ if value < s.min { -+ s.min = value -+ } -+ if value > s.max { -+ s.max = value -+ } -+} -+ -+func (s *SummaryStatistics) AddToCount(addend float64) { -+ s.count += addend -+} -+ -+func (s *SummaryStatistics) AddToSum(addend float64) { -+ s.sumWithCompensation(addend) -+ s.simpleSum += addend -+} -+ -+func (s *SummaryStatistics) MergeWith(o *SummaryStatistics) { -+ s.count += o.count -+ s.sumWithCompensation(o.sum) -+ s.sumWithCompensation(o.sumCompensation) -+ s.simpleSum += o.simpleSum -+ if o.min < s.min { -+ s.min = o.min -+ } -+ if o.max > s.max { -+ s.max = o.max -+ } -+} -+ -+func (s *SummaryStatistics) sumWithCompensation(value float64) { -+ tmp := value - s.sumCompensation -+ velvel := s.sum + tmp // little wolf of rounding error -+ s.sumCompensation = velvel - s.sum - tmp -+ s.sum = velvel -+} -+ -+// Reweight adjusts the statistics so that they are equal to what they would -+// have been if AddWithCount had been called with counts multiplied by factor. -+func (s *SummaryStatistics) Reweight(factor float64) { -+ s.count *= factor -+ s.sum *= factor -+ s.sumCompensation *= factor -+ s.simpleSum *= factor -+ if factor == 0 { -+ s.min = math.Inf(1) -+ s.max = math.Inf(-1) -+ } -+} -+ -+// Rescale adjusts the statistics so that they are equal to what they would have -+// been if AddWithCount had been called with values multiplied by factor. -+func (s *SummaryStatistics) Rescale(factor float64) { -+ s.sum *= factor -+ s.sumCompensation *= factor -+ s.simpleSum *= factor -+ if factor > 0 { -+ s.min *= factor -+ s.max *= factor -+ } else if factor < 0 { -+ tmp := s.max * factor -+ s.max = s.min * factor -+ s.min = tmp -+ } else if s.count != 0 { -+ s.min = 0 -+ s.max = 0 -+ } -+} -+ -+func (s *SummaryStatistics) Clear() { -+ s.count = 0 -+ s.sum = 0 -+ s.sumCompensation = 0 -+ s.simpleSum = 0 -+ s.min = math.Inf(1) -+ s.max = math.Inf(-1) -+} -+ -+func (s *SummaryStatistics) Copy() *SummaryStatistics { -+ return &SummaryStatistics{ -+ count: s.count, -+ sum: s.sum, -+ sumCompensation: s.sumCompensation, -+ simpleSum: s.simpleSum, -+ min: s.min, -+ max: s.max, -+ } -+} -diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/store/bin.go b/vendor/github.com/DataDog/sketches-go/ddsketch/store/bin.go -new file mode 100644 -index 0000000000000..19843ba9e1234 ---- /dev/null -+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/store/bin.go -@@ -0,0 +1,28 @@ -+// Unless explicitly stated otherwise all files in this repository are licensed -+// under the Apache License 2.0. -+// This product includes software developed at Datadog (https://www.datadoghq.com/). -+// Copyright 2021 Datadog, Inc. -+ -+package store -+ -+import ""errors"" -+ -+type Bin struct { -+ index int -+ count float64 -+} -+ -+func NewBin(index int, count float64) (*Bin, error) { -+ if count < 0 { -+ return nil, errors.New(""The count cannot be negative"") -+ } -+ return &Bin{index: index, count: count}, nil -+} -+ -+func (b Bin) Index() int { -+ return b.index -+} -+ -+func (b Bin) Count() float64 { -+ return b.count -+} -diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/store/buffered_paginated.go b/vendor/github.com/DataDog/sketches-go/ddsketch/store/buffered_paginated.go -new file mode 100644 -index 0000000000000..11f43107d6e07 ---- /dev/null -+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/store/buffered_paginated.go -@@ -0,0 +1,667 @@ -+// Unless explicitly stated otherwise all files in this repository are licensed -+// under the Apache License 2.0. -+// This product includes software developed at Datadog (https://www.datadoghq.com/). -+// Copyright 2021 Datadog, Inc. -+ -+package store -+ -+import ( -+ ""errors"" -+ ""sort"" -+ -+ enc ""github.com/DataDog/sketches-go/ddsketch/encoding"" -+ ""github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"" -+) -+ -+const ( -+ ptrSize = 32 << (^uintptr(0) >> 63) -+ intSize = 32 << (^uint(0) >> 63) -+ float64size = 64 -+ bufferEntrySize = intSize -+ countSize = float64size -+ -+ defaultPageLenLog2 = 5 // pageLen = 32 -+) -+ -+// BufferedPaginatedStore allocates storage for counts in aligned fixed-size -+// pages, themselves stored in a dynamically-sized slice. A page encodes the -+// counts for a contiguous range of indexes, and two pages that are contiguous -+// in the slice encode ranges that are contiguous. In addition, input indexes -+// that are added to the store with a count equal to 1 can be stored in a -+// buffer. -+// The store favors using the buffer and only creates pages when the memory size -+// of the page is no greater than the memory space that is needed to keep in the -+// buffer the indexes that could otherwise be encoded in that page. That means -+// that some indexes may stay indefinitely in the buffer if, to be removed from -+// the buffer, they would create a page that is almost empty. The process that -+// transfers indexes from the buffer to pages is called compaction. -+// This store never collapses or merges bins, therefore, it does not introduce -+// any error in itself. In particular, MinIndex(), MaxIndex(), Bins() and -+// KeyAtRank() return exact results. -+// There is no upper bound on the memory size that this store needs to encode -+// input indexes, and some input data distributions may make it reach large -+// sizes. However, thanks to the buffer and the fact that only required pages -+// are allocated, it can be much more space efficient than alternative stores, -+// especially dense stores, in various situations, including when only few -+// indexes are added (with their counts equal to 1), when the input data has a -+// few outliers or when the input data distribution is multimodal. -+type BufferedPaginatedStore struct { -+ buffer []int // FIXME: in practice, int32 (even int16, depending on the accuracy parameter) is enough -+ bufferCompactionTriggerLen int // compaction happens only after this buffer length is reached -+ -+ pages [][]float64 // len == cap, the slice is always used to its maximum capacity -+ minPageIndex int // minPageIndex == maxInt iff pages are unused (they may still be allocated) -+ pageLenLog2 int -+ pageLenMask int -+} -+ -+func NewBufferedPaginatedStore() *BufferedPaginatedStore { -+ initialBufferCapacity := 4 -+ pageLenLog2 := defaultPageLenLog2 -+ pageLen := 1 << pageLenLog2 -+ -+ return &BufferedPaginatedStore{ -+ buffer: make([]int, 0, initialBufferCapacity), -+ bufferCompactionTriggerLen: 2 * pageLen, -+ pages: nil, -+ minPageIndex: maxInt, -+ pageLenLog2: pageLenLog2, -+ pageLenMask: pageLen - 1, -+ } -+} -+ -+// pageIndex returns the page number the given index falls on. -+func (s *BufferedPaginatedStore) pageIndex(index int) int { -+ return index >> s.pageLenLog2 -+} -+ -+// lineIndex returns the line number within a page that the given index falls on. -+func (s *BufferedPaginatedStore) lineIndex(index int) int { -+ return index & s.pageLenMask -+} -+ -+// index returns the store-level index for a given page number and a line within that page. -+func (s *BufferedPaginatedStore) index(pageIndex, lineIndex int) int { -+ return pageIndex<= s.minPageIndex && pageIndex < s.minPageIndex+len(s.pages) { -+ // No need to extend s.pages. -+ page := &s.pages[pageIndex-s.minPageIndex] -+ if ensureExists && len(*page) == 0 { -+ *page = append(*page, make([]float64, pageLen)...) -+ } -+ return *page -+ } -+ -+ if !ensureExists { -+ return nil -+ } -+ -+ if pageIndex < s.minPageIndex { -+ if s.minPageIndex == maxInt { -+ if len(s.pages) == 0 { -+ s.pages = append(s.pages, make([][]float64, s.newPagesLen(1))...) -+ } -+ s.minPageIndex = pageIndex - len(s.pages)/2 -+ } else { -+ // Extends s.pages left. -+ newLen := s.newPagesLen(s.minPageIndex - pageIndex + 1 + len(s.pages)) -+ addedLen := newLen - len(s.pages) -+ s.pages = append(s.pages, make([][]float64, addedLen)...) -+ copy(s.pages[addedLen:], s.pages) -+ for i := 0; i < addedLen; i++ { -+ s.pages[i] = nil -+ } -+ s.minPageIndex -= addedLen -+ } -+ } else { -+ // Extends s.pages right. -+ s.pages = append(s.pages, make([][]float64, s.newPagesLen(pageIndex-s.minPageIndex+1)-len(s.pages))...) -+ } -+ -+ page := &s.pages[pageIndex-s.minPageIndex] -+ if len(*page) == 0 { -+ *page = append(*page, make([]float64, pageLen)...) -+ } -+ return *page -+} -+ -+func (s *BufferedPaginatedStore) newPagesLen(required int) int { -+ // Grow in size by multiples of 64 bytes -+ pageGrowthIncrement := 64 * 8 / ptrSize -+ return (required + pageGrowthIncrement - 1) & -pageGrowthIncrement -+} -+ -+// compact transfers indexes from the buffer to the pages. It only creates new -+// pages if they can encode enough buffered indexes so that it frees more space -+// in the buffer than the new page takes. -+func (s *BufferedPaginatedStore) compact() { -+ pageLen := 1 << s.pageLenLog2 -+ -+ s.sortBuffer() -+ -+ for bufferPos := 0; bufferPos < len(s.buffer); { -+ bufferPageStart := bufferPos -+ pageIndex := s.pageIndex(s.buffer[bufferPageStart]) -+ bufferPos++ -+ for bufferPos < len(s.buffer) && s.pageIndex(s.buffer[bufferPos]) == pageIndex { -+ bufferPos++ -+ } -+ bufferPageEnd := bufferPos -+ -+ // We avoid creating a new page if it would take more memory space than -+ // what we would free in the buffer. Note that even when the page itself -+ // takes less memory space than the buffered indexes that can be encoded -+ // in the page, because we may have to extend s.pages, the store may end -+ // up larger. However, for the sake of simplicity, we ignore the length -+ // of s.pages. -+ ensureExists := (bufferPageEnd-bufferPageStart)*bufferEntrySize >= pageLen*float64size -+ newPage := s.page(pageIndex, ensureExists) -+ if len(newPage) > 0 { -+ for _, index := range s.buffer[bufferPageStart:bufferPageEnd] { -+ newPage[s.lineIndex(index)]++ -+ } -+ copy(s.buffer[bufferPageStart:], s.buffer[bufferPageEnd:]) -+ s.buffer = s.buffer[:len(s.buffer)+bufferPageStart-bufferPageEnd] -+ bufferPos = bufferPageStart -+ } -+ } -+ -+ s.bufferCompactionTriggerLen = len(s.buffer) + pageLen -+} -+ -+func (s *BufferedPaginatedStore) sortBuffer() { -+ sort.Slice(s.buffer, func(i, j int) bool { return s.buffer[i] < s.buffer[j] }) -+} -+ -+func (s *BufferedPaginatedStore) Add(index int) { -+ pageIndex := s.pageIndex(index) -+ if pageIndex >= s.minPageIndex && pageIndex < s.minPageIndex+len(s.pages) { -+ page := s.pages[pageIndex-s.minPageIndex] -+ if len(page) > 0 { -+ page[s.lineIndex(index)]++ -+ return -+ } -+ } -+ -+ // The page does not exist, use the buffer. -+ if len(s.buffer) == cap(s.buffer) && len(s.buffer) >= s.bufferCompactionTriggerLen { -+ s.compact() -+ } -+ -+ s.buffer = append(s.buffer, index) -+} -+ -+func (s *BufferedPaginatedStore) AddBin(bin Bin) { -+ s.AddWithCount(bin.Index(), bin.Count()) -+} -+ -+func (s *BufferedPaginatedStore) AddWithCount(index int, count float64) { -+ if count == 0 { -+ return -+ } else if count == 1 { -+ s.Add(index) -+ } else { -+ s.page(s.pageIndex(index), true)[s.lineIndex(index)] += count -+ } -+} -+ -+func (s *BufferedPaginatedStore) IsEmpty() bool { -+ if len(s.buffer) > 0 { -+ return false -+ } -+ for _, page := range s.pages { -+ for _, count := range page { -+ if count > 0 { -+ return false -+ } -+ } -+ } -+ return true -+} -+ -+func (s *BufferedPaginatedStore) TotalCount() float64 { -+ totalCount := float64(len(s.buffer)) -+ for _, page := range s.pages { -+ for _, count := range page { -+ totalCount += count -+ } -+ } -+ return totalCount -+} -+ -+func (s *BufferedPaginatedStore) MinIndex() (int, error) { -+ isEmpty := true -+ -+ // Iterate over the buffer. -+ var minIndex int -+ for _, index := range s.buffer { -+ if isEmpty || index < minIndex { -+ isEmpty = false -+ minIndex = index -+ } -+ } -+ -+ // Iterate over the pages. -+ for pageIndex := s.minPageIndex; pageIndex < s.minPageIndex+len(s.pages) && (isEmpty || pageIndex <= s.pageIndex(minIndex)); pageIndex++ { -+ page := s.pages[pageIndex-s.minPageIndex] -+ if len(page) == 0 { -+ continue -+ } -+ -+ var lineIndexRangeEnd int -+ if !isEmpty && pageIndex == s.pageIndex(minIndex) { -+ lineIndexRangeEnd = s.lineIndex(minIndex) -+ } else { -+ lineIndexRangeEnd = 1 << s.pageLenLog2 -+ } -+ -+ for lineIndex := 0; lineIndex < lineIndexRangeEnd; lineIndex++ { -+ if page[lineIndex] > 0 { -+ return s.index(pageIndex, lineIndex), nil -+ } -+ } -+ } -+ -+ if isEmpty { -+ return 0, errUndefinedMinIndex -+ } else { -+ return minIndex, nil -+ } -+} -+ -+func (s *BufferedPaginatedStore) MaxIndex() (int, error) { -+ isEmpty := true -+ -+ // Iterate over the buffer. -+ var maxIndex int -+ for _, index := range s.buffer { -+ if isEmpty || index > maxIndex { -+ isEmpty = false -+ maxIndex = index -+ } -+ } -+ -+ // Iterate over the pages. -+ for pageIndex := s.minPageIndex + len(s.pages) - 1; pageIndex >= s.minPageIndex && (isEmpty || pageIndex >= s.pageIndex(maxIndex)); pageIndex-- { -+ page := s.pages[pageIndex-s.minPageIndex] -+ if len(page) == 0 { -+ continue -+ } -+ -+ var lineIndexRangeStart int -+ if !isEmpty && pageIndex == s.pageIndex(maxIndex) { -+ lineIndexRangeStart = s.lineIndex(maxIndex) -+ } else { -+ lineIndexRangeStart = 0 -+ } -+ -+ for lineIndex := len(page) - 1; lineIndex >= lineIndexRangeStart; lineIndex-- { -+ if page[lineIndex] > 0 { -+ return s.index(pageIndex, lineIndex), nil -+ } -+ } -+ } -+ -+ if isEmpty { -+ return 0, errUndefinedMaxIndex -+ } else { -+ return maxIndex, nil -+ } -+} -+ -+func (s *BufferedPaginatedStore) KeyAtRank(rank float64) int { -+ if rank < 0 { -+ rank = 0 -+ } -+ key, err := s.minIndexWithCumulCount(func(cumulCount float64) bool { -+ return cumulCount > rank -+ }) -+ -+ if err != nil { -+ maxIndex, err := s.MaxIndex() -+ if err == nil { -+ return maxIndex -+ } else { -+ // FIXME: make Store's KeyAtRank consistent with MinIndex and MaxIndex -+ return 0 -+ } -+ } -+ return key -+} -+ -+// minIndexWithCumulCount returns the minimum index whose cumulative count (that -+// is, the sum of the counts associated with the indexes less than or equal to -+// the index) verifies the predicate. -+func (s *BufferedPaginatedStore) minIndexWithCumulCount(predicate func(float64) bool) (int, error) { -+ s.sortBuffer() -+ cumulCount := float64(0) -+ -+ // Iterate over the pages and the buffer simultaneously. -+ bufferPos := 0 -+ for pageOffset, page := range s.pages { -+ for lineIndex, count := range page { -+ index := s.index(s.minPageIndex+pageOffset, lineIndex) -+ -+ // Iterate over the buffer until index is reached. -+ for ; bufferPos < len(s.buffer) && s.buffer[bufferPos] < index; bufferPos++ { -+ cumulCount++ -+ if predicate(cumulCount) { -+ return s.buffer[bufferPos], nil -+ } -+ } -+ cumulCount += count -+ if predicate(cumulCount) { -+ return index, nil -+ } -+ } -+ } -+ -+ // Iterate over the rest of the buffer -+ for ; bufferPos < len(s.buffer); bufferPos++ { -+ cumulCount++ -+ if predicate(cumulCount) { -+ return s.buffer[bufferPos], nil -+ } -+ } -+ -+ return 0, errors.New(""the predicate on the cumulative count is never verified"") -+} -+ -+func (s *BufferedPaginatedStore) MergeWith(other Store) { -+ o, ok := other.(*BufferedPaginatedStore) -+ if ok && s.pageLenLog2 == o.pageLenLog2 { -+ // Merge pages. -+ for oPageOffset, oPage := range o.pages { -+ if len(oPage) == 0 { -+ continue -+ } -+ oPageIndex := o.minPageIndex + oPageOffset -+ page := s.page(oPageIndex, true) -+ for i, oCount := range oPage { -+ page[i] += oCount -+ } -+ } -+ -+ // Merge buffers. -+ for _, index := range o.buffer { -+ s.Add(index) -+ } -+ } else { -+ // Fallback merging. -+ other.ForEach(func(index int, count float64) (stop bool) { -+ s.AddWithCount(index, count) -+ return false -+ }) -+ } -+} -+ -+func (s *BufferedPaginatedStore) MergeWithProto(pb *sketchpb.Store) { -+ for index, count := range pb.BinCounts { -+ s.AddWithCount(int(index), count) -+ } -+ for indexOffset, count := range pb.ContiguousBinCounts { -+ s.AddWithCount(int(pb.ContiguousBinIndexOffset)+indexOffset, count) -+ } -+} -+ -+func (s *BufferedPaginatedStore) Bins() <-chan Bin { -+ s.sortBuffer() -+ ch := make(chan Bin) -+ go func() { -+ defer close(ch) -+ bufferPos := 0 -+ -+ // Iterate over the pages and the buffer simultaneously. -+ for pageOffset, page := range s.pages { -+ for lineIndex, count := range page { -+ if count == 0 { -+ continue -+ } -+ -+ index := s.index(s.minPageIndex+pageOffset, lineIndex) -+ -+ // Iterate over the buffer until index is reached. -+ var indexBufferStartPos int -+ for { -+ indexBufferStartPos = bufferPos -+ if indexBufferStartPos >= len(s.buffer) || s.buffer[indexBufferStartPos] > index { -+ break -+ } -+ bufferPos++ -+ for bufferPos < len(s.buffer) && s.buffer[bufferPos] == s.buffer[indexBufferStartPos] { -+ bufferPos++ -+ } -+ if s.buffer[indexBufferStartPos] == index { -+ break -+ } -+ ch <- Bin{index: s.buffer[indexBufferStartPos], count: float64(bufferPos - indexBufferStartPos)} -+ } -+ ch <- Bin{index: index, count: count + float64(bufferPos-indexBufferStartPos)} -+ } -+ } -+ -+ // Iterate over the rest of the buffer. -+ for bufferPos < len(s.buffer) { -+ indexBufferStartPos := bufferPos -+ bufferPos++ -+ for bufferPos < len(s.buffer) && s.buffer[bufferPos] == s.buffer[indexBufferStartPos] { -+ bufferPos++ -+ } -+ bin := Bin{index: s.buffer[indexBufferStartPos], count: float64(bufferPos - indexBufferStartPos)} -+ ch <- bin -+ } -+ }() -+ return ch -+} -+ -+func (s *BufferedPaginatedStore) ForEach(f func(index int, count float64) (stop bool)) { -+ s.sortBuffer() -+ bufferPos := 0 -+ -+ // Iterate over the pages and the buffer simultaneously. -+ for pageOffset, page := range s.pages { -+ for lineIndex, count := range page { -+ if count == 0 { -+ continue -+ } -+ -+ index := s.index(s.minPageIndex+pageOffset, lineIndex) -+ -+ // Iterate over the buffer until index is reached. -+ var indexBufferStartPos int -+ for { -+ indexBufferStartPos = bufferPos -+ if indexBufferStartPos >= len(s.buffer) || s.buffer[indexBufferStartPos] > index { -+ break -+ } -+ bufferPos++ -+ for bufferPos < len(s.buffer) && s.buffer[bufferPos] == s.buffer[indexBufferStartPos] { -+ bufferPos++ -+ } -+ if s.buffer[indexBufferStartPos] == index { -+ break -+ } -+ if f(s.buffer[indexBufferStartPos], float64(bufferPos-indexBufferStartPos)) { -+ return -+ } -+ } -+ if f(index, count+float64(bufferPos-indexBufferStartPos)) { -+ return -+ } -+ } -+ } -+ -+ // Iterate over the rest of the buffer. -+ for bufferPos < len(s.buffer) { -+ indexBufferStartPos := bufferPos -+ bufferPos++ -+ for bufferPos < len(s.buffer) && s.buffer[bufferPos] == s.buffer[indexBufferStartPos] { -+ bufferPos++ -+ } -+ if f(s.buffer[indexBufferStartPos], float64(bufferPos-indexBufferStartPos)) { -+ return -+ } -+ } -+} -+ -+func (s *BufferedPaginatedStore) Copy() Store { -+ bufferCopy := make([]int, len(s.buffer)) -+ copy(bufferCopy, s.buffer) -+ pagesCopy := make([][]float64, len(s.pages)) -+ for i, page := range s.pages { -+ if len(page) > 0 { -+ pageCopy := make([]float64, len(page)) -+ copy(pageCopy, page) -+ pagesCopy[i] = pageCopy -+ } -+ } -+ return &BufferedPaginatedStore{ -+ buffer: bufferCopy, -+ bufferCompactionTriggerLen: s.bufferCompactionTriggerLen, -+ pages: pagesCopy, -+ minPageIndex: s.minPageIndex, -+ pageLenLog2: s.pageLenLog2, -+ pageLenMask: s.pageLenMask, -+ } -+} -+ -+func (s *BufferedPaginatedStore) Clear() { -+ s.buffer = s.buffer[:0] -+ for i := range s.pages { -+ s.pages[i] = s.pages[i][:0] -+ } -+ s.minPageIndex = maxInt -+} -+ -+func (s *BufferedPaginatedStore) ToProto() *sketchpb.Store { -+ if s.IsEmpty() { -+ return &sketchpb.Store{} -+ } -+ // FIXME: add heuristic to use contiguousBinCounts when cheaper. -+ binCounts := make(map[int32]float64) -+ s.ForEach(func(index int, count float64) (stop bool) { -+ binCounts[int32(index)] = count -+ return false -+ }) -+ return &sketchpb.Store{ -+ BinCounts: binCounts, -+ } -+} -+ -+func (s *BufferedPaginatedStore) Reweight(w float64) error { -+ if w <= 0 { -+ return errors.New(""can't reweight by a negative factor"") -+ } -+ if w == 1 { -+ return nil -+ } -+ buffer := s.buffer -+ s.buffer = s.buffer[:0] -+ for _, p := range s.pages { -+ for i := range p { -+ p[i] *= w -+ } -+ } -+ for _, index := range buffer { -+ s.AddWithCount(index, w) -+ } -+ return nil -+} -+ -+func (s *BufferedPaginatedStore) Encode(b *[]byte, t enc.FlagType) { -+ s.compact() -+ if len(s.buffer) > 0 { -+ enc.EncodeFlag(b, enc.NewFlag(t, enc.BinEncodingIndexDeltas)) -+ enc.EncodeUvarint64(b, uint64(len(s.buffer))) -+ previousIndex := 0 -+ for _, index := range s.buffer { -+ enc.EncodeVarint64(b, int64(index-previousIndex)) -+ previousIndex = index -+ } -+ } -+ -+ for pageOffset, page := range s.pages { -+ if len(page) > 0 { -+ enc.EncodeFlag(b, enc.NewFlag(t, enc.BinEncodingContiguousCounts)) -+ enc.EncodeUvarint64(b, uint64(len(page))) -+ enc.EncodeVarint64(b, int64(s.index(s.minPageIndex+pageOffset, 0))) -+ enc.EncodeVarint64(b, 1) -+ for _, count := range page { -+ enc.EncodeVarfloat64(b, count) -+ } -+ } -+ } -+} -+ -+func (s *BufferedPaginatedStore) DecodeAndMergeWith(b *[]byte, encodingMode enc.SubFlag) error { -+ switch encodingMode { -+ -+ case enc.BinEncodingIndexDeltas: -+ numBins, err := enc.DecodeUvarint64(b) -+ if err != nil { -+ return err -+ } -+ remaining := int(numBins) -+ index := int64(0) -+ // Process indexes in batches to avoid checking after each insertion -+ // whether compaction should happen. -+ for { -+ batchSize := min(remaining, max(cap(s.buffer), s.bufferCompactionTriggerLen)-len(s.buffer)) -+ for i := 0; i < batchSize; i++ { -+ indexDelta, err := enc.DecodeVarint64(b) -+ if err != nil { -+ return err -+ } -+ index += indexDelta -+ s.buffer = append(s.buffer, int(index)) -+ } -+ remaining -= batchSize -+ if remaining == 0 { -+ return nil -+ } -+ s.compact() -+ } -+ -+ case enc.BinEncodingContiguousCounts: -+ numBins, err := enc.DecodeUvarint64(b) -+ if err != nil { -+ return err -+ } -+ indexOffset, err := enc.DecodeVarint64(b) -+ if err != nil { -+ return err -+ } -+ indexDelta, err := enc.DecodeVarint64(b) -+ if err != nil { -+ return err -+ } -+ pageLen := 1 << s.pageLenLog2 -+ for i := uint64(0); i < numBins; { -+ page := s.page(s.pageIndex(int(indexOffset)), true) -+ lineIndex := s.lineIndex(int(indexOffset)) -+ for lineIndex >= 0 && lineIndex < pageLen && i < numBins { -+ count, err := enc.DecodeVarfloat64(b) -+ if err != nil { -+ return err -+ } -+ page[lineIndex] += count -+ lineIndex += int(indexDelta) -+ indexOffset += indexDelta -+ i++ -+ } -+ } -+ return nil -+ -+ default: -+ return DecodeAndMergeWith(s, b, encodingMode) -+ } -+} -+ -+var _ Store = (*BufferedPaginatedStore)(nil) -diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/store/collapsing_highest_dense_store.go b/vendor/github.com/DataDog/sketches-go/ddsketch/store/collapsing_highest_dense_store.go -new file mode 100644 -index 0000000000000..2a431a176689e ---- /dev/null -+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/store/collapsing_highest_dense_store.go -@@ -0,0 +1,188 @@ -+// Unless explicitly stated otherwise all files in this repository are licensed -+// under the Apache License 2.0. -+// This product includes software developed at Datadog (https://www.datadoghq.com/). -+// Copyright 2021 Datadog, Inc. -+ -+package store -+ -+import ( -+ ""math"" -+ -+ enc ""github.com/DataDog/sketches-go/ddsketch/encoding"" -+) -+ -+type CollapsingHighestDenseStore struct { -+ DenseStore -+ maxNumBins int -+ isCollapsed bool -+} -+ -+func NewCollapsingHighestDenseStore(maxNumBins int) *CollapsingHighestDenseStore { -+ return &CollapsingHighestDenseStore{ -+ DenseStore: DenseStore{minIndex: math.MaxInt32, maxIndex: math.MinInt32}, -+ maxNumBins: maxNumBins, -+ isCollapsed: false, -+ } -+} -+ -+func (s *CollapsingHighestDenseStore) Add(index int) { -+ s.AddWithCount(index, float64(1)) -+} -+ -+func (s *CollapsingHighestDenseStore) AddBin(bin Bin) { -+ index := bin.Index() -+ count := bin.Count() -+ if count == 0 { -+ return -+ } -+ s.AddWithCount(index, count) -+} -+ -+func (s *CollapsingHighestDenseStore) AddWithCount(index int, count float64) { -+ if count == 0 { -+ return -+ } -+ arrayIndex := s.normalize(index) -+ s.bins[arrayIndex] += count -+ s.count += count -+} -+ -+// Normalize the store, if necessary, so that the counter of the specified index can be updated. -+func (s *CollapsingHighestDenseStore) normalize(index int) int { -+ if index > s.maxIndex { -+ if s.isCollapsed { -+ return len(s.bins) - 1 -+ } else { -+ s.extendRange(index, index) -+ if s.isCollapsed { -+ return len(s.bins) - 1 -+ } -+ } -+ } else if index < s.minIndex { -+ s.extendRange(index, index) -+ } -+ return index - s.offset -+} -+ -+func (s *CollapsingHighestDenseStore) getNewLength(newMinIndex, newMaxIndex int) int { -+ return min(s.DenseStore.getNewLength(newMinIndex, newMaxIndex), s.maxNumBins) -+} -+ -+func (s *CollapsingHighestDenseStore) extendRange(newMinIndex, newMaxIndex int) { -+ newMinIndex = min(newMinIndex, s.minIndex) -+ newMaxIndex = max(newMaxIndex, s.maxIndex) -+ if s.IsEmpty() { -+ initialLength := s.getNewLength(newMinIndex, newMaxIndex) -+ s.bins = append(s.bins, make([]float64, initialLength)...) -+ s.offset = newMinIndex -+ s.minIndex = newMinIndex -+ s.maxIndex = newMaxIndex -+ s.adjust(newMinIndex, newMaxIndex) -+ } else if newMinIndex >= s.offset && newMaxIndex < s.offset+len(s.bins) { -+ s.minIndex = newMinIndex -+ s.maxIndex = newMaxIndex -+ } else { -+ // To avoid shifting too often when nearing the capacity of the array, -+ // we may grow it before we actually reach the capacity. -+ newLength := s.getNewLength(newMinIndex, newMaxIndex) -+ if newLength > len(s.bins) { -+ s.bins = append(s.bins, make([]float64, newLength-len(s.bins))...) -+ } -+ s.adjust(newMinIndex, newMaxIndex) -+ } -+} -+ -+// Adjust bins, offset, minIndex and maxIndex, without resizing the bins slice in order to make it fit the -+// specified range. -+func (s *CollapsingHighestDenseStore) adjust(newMinIndex, newMaxIndex int) { -+ if newMaxIndex-newMinIndex+1 > len(s.bins) { -+ // The range of indices is too wide, buckets of lowest indices need to be collapsed. -+ newMaxIndex = newMinIndex + len(s.bins) - 1 -+ if newMaxIndex <= s.minIndex { -+ // There will be only one non-empty bucket. -+ s.bins = make([]float64, len(s.bins)) -+ s.offset = newMinIndex -+ s.maxIndex = newMaxIndex -+ s.bins[len(s.bins)-1] = s.count -+ } else { -+ shift := s.offset - newMinIndex -+ if shift > 0 { -+ // Collapse the buckets. -+ n := float64(0) -+ for i := newMaxIndex + 1; i <= s.maxIndex; i++ { -+ n += s.bins[i-s.offset] -+ } -+ s.resetBins(newMaxIndex+1, s.maxIndex) -+ s.bins[newMaxIndex-s.offset] += n -+ s.maxIndex = newMaxIndex -+ // Shift the buckets to make room for newMinIndex. -+ s.shiftCounts(shift) -+ } else { -+ // Shift the buckets to make room for newMaxIndex. -+ s.shiftCounts(shift) -+ s.maxIndex = newMaxIndex -+ } -+ } -+ s.minIndex = newMinIndex -+ s.isCollapsed = true -+ } else { -+ s.centerCounts(newMinIndex, newMaxIndex) -+ } -+} -+ -+func (s *CollapsingHighestDenseStore) MergeWith(other Store) { -+ if other.IsEmpty() { -+ return -+ } -+ o, ok := other.(*CollapsingHighestDenseStore) -+ if !ok { -+ other.ForEach(func(index int, count float64) (stop bool) { -+ s.AddWithCount(index, count) -+ return false -+ }) -+ return -+ } -+ if o.minIndex < s.minIndex || o.maxIndex > s.maxIndex { -+ s.extendRange(o.minIndex, o.maxIndex) -+ } -+ idx := o.maxIndex -+ for ; idx > s.maxIndex && idx >= o.minIndex; idx-- { -+ s.bins[len(s.bins)-1] += o.bins[idx-o.offset] -+ } -+ for ; idx > o.minIndex; idx-- { -+ s.bins[idx-s.offset] += o.bins[idx-o.offset] -+ } -+ // This is a separate test so that the comparison in the previous loop is strict (>) and handles -+ // o.minIndex = Integer.MIN_VALUE. -+ if idx == o.minIndex { -+ s.bins[idx-s.offset] += o.bins[idx-o.offset] -+ } -+ s.count += o.count -+} -+ -+func (s *CollapsingHighestDenseStore) Copy() Store { -+ bins := make([]float64, len(s.bins)) -+ copy(bins, s.bins) -+ return &CollapsingHighestDenseStore{ -+ DenseStore: DenseStore{ -+ bins: bins, -+ count: s.count, -+ offset: s.offset, -+ minIndex: s.minIndex, -+ maxIndex: s.maxIndex, -+ }, -+ maxNumBins: s.maxNumBins, -+ isCollapsed: s.isCollapsed, -+ } -+} -+ -+func (s *CollapsingHighestDenseStore) Clear() { -+ s.DenseStore.Clear() -+ s.isCollapsed = false -+} -+ -+func (s *CollapsingHighestDenseStore) DecodeAndMergeWith(r *[]byte, encodingMode enc.SubFlag) error { -+ return DecodeAndMergeWith(s, r, encodingMode) -+} -+ -+var _ Store = (*CollapsingHighestDenseStore)(nil) -diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/store/collapsing_lowest_dense_store.go b/vendor/github.com/DataDog/sketches-go/ddsketch/store/collapsing_lowest_dense_store.go -new file mode 100644 -index 0000000000000..80ae2a50767a0 ---- /dev/null -+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/store/collapsing_lowest_dense_store.go -@@ -0,0 +1,207 @@ -+// Unless explicitly stated otherwise all files in this repository are licensed -+// under the Apache License 2.0. -+// This product includes software developed at Datadog (https://www.datadoghq.com/). -+// Copyright 2021 Datadog, Inc. -+ -+package store -+ -+import ( -+ ""math"" -+ -+ enc ""github.com/DataDog/sketches-go/ddsketch/encoding"" -+) -+ -+// CollapsingLowestDenseStore is a dynamically growing contiguous (non-sparse) store. -+// The lower bins get combined so that the total number of bins do not exceed maxNumBins. -+type CollapsingLowestDenseStore struct { -+ DenseStore -+ maxNumBins int -+ isCollapsed bool -+} -+ -+func NewCollapsingLowestDenseStore(maxNumBins int) *CollapsingLowestDenseStore { -+ // Bins are not allocated until values are added. -+ // When the first value is added, a small number of bins are allocated. The number of bins will -+ // grow as needed up to maxNumBins. -+ return &CollapsingLowestDenseStore{ -+ DenseStore: DenseStore{minIndex: math.MaxInt32, maxIndex: math.MinInt32}, -+ maxNumBins: maxNumBins, -+ isCollapsed: false, -+ } -+} -+ -+func (s *CollapsingLowestDenseStore) Add(index int) { -+ s.AddWithCount(index, float64(1)) -+} -+ -+func (s *CollapsingLowestDenseStore) AddBin(bin Bin) { -+ index := bin.Index() -+ count := bin.Count() -+ if count == 0 { -+ return -+ } -+ s.AddWithCount(index, count) -+} -+ -+func (s *CollapsingLowestDenseStore) AddWithCount(index int, count float64) { -+ if count == 0 { -+ return -+ } -+ arrayIndex := s.normalize(index) -+ s.bins[arrayIndex] += count -+ s.count += count -+} -+ -+// Normalize the store, if necessary, so that the counter of the specified index can be updated. -+func (s *CollapsingLowestDenseStore) normalize(index int) int { -+ if index < s.minIndex { -+ if s.isCollapsed { -+ return 0 -+ } else { -+ s.extendRange(index, index) -+ if s.isCollapsed { -+ return 0 -+ } -+ } -+ } else if index > s.maxIndex { -+ s.extendRange(index, index) -+ } -+ return index - s.offset -+} -+ -+func (s *CollapsingLowestDenseStore) getNewLength(newMinIndex, newMaxIndex int) int { -+ return min(s.DenseStore.getNewLength(newMinIndex, newMaxIndex), s.maxNumBins) -+} -+ -+func (s *CollapsingLowestDenseStore) extendRange(newMinIndex, newMaxIndex int) { -+ newMinIndex = min(newMinIndex, s.minIndex) -+ newMaxIndex = max(newMaxIndex, s.maxIndex) -+ if s.IsEmpty() { -+ initialLength := s.getNewLength(newMinIndex, newMaxIndex) -+ s.bins = append(s.bins, make([]float64, initialLength)...) -+ s.offset = newMinIndex -+ s.minIndex = newMinIndex -+ s.maxIndex = newMaxIndex -+ s.adjust(newMinIndex, newMaxIndex) -+ } else if newMinIndex >= s.offset && newMaxIndex < s.offset+len(s.bins) { -+ s.minIndex = newMinIndex -+ s.maxIndex = newMaxIndex -+ } else { -+ // To avoid shifting too often when nearing the capacity of the array, -+ // we may grow it before we actually reach the capacity. -+ newLength := s.getNewLength(newMinIndex, newMaxIndex) -+ if newLength > len(s.bins) { -+ s.bins = append(s.bins, make([]float64, newLength-len(s.bins))...) -+ } -+ s.adjust(newMinIndex, newMaxIndex) -+ } -+} -+ -+// Adjust bins, offset, minIndex and maxIndex, without resizing the bins slice in order to make it fit the -+// specified range. -+func (s *CollapsingLowestDenseStore) adjust(newMinIndex, newMaxIndex int) { -+ if newMaxIndex-newMinIndex+1 > len(s.bins) { -+ // The range of indices is too wide, buckets of lowest indices need to be collapsed. -+ newMinIndex = newMaxIndex - len(s.bins) + 1 -+ if newMinIndex >= s.maxIndex { -+ // There will be only one non-empty bucket. -+ s.bins = make([]float64, len(s.bins)) -+ s.offset = newMinIndex -+ s.minIndex = newMinIndex -+ s.bins[0] = s.count -+ } else { -+ shift := s.offset - newMinIndex -+ if shift < 0 { -+ // Collapse the buckets. -+ n := float64(0) -+ for i := s.minIndex; i < newMinIndex; i++ { -+ n += s.bins[i-s.offset] -+ } -+ s.resetBins(s.minIndex, newMinIndex-1) -+ s.bins[newMinIndex-s.offset] += n -+ s.minIndex = newMinIndex -+ // Shift the buckets to make room for newMaxIndex. -+ s.shiftCounts(shift) -+ } else { -+ // Shift the buckets to make room for newMinIndex. -+ s.shiftCounts(shift) -+ s.minIndex = newMinIndex -+ } -+ } -+ s.maxIndex = newMaxIndex -+ s.isCollapsed = true -+ } else { -+ s.centerCounts(newMinIndex, newMaxIndex) -+ } -+} -+ -+func (s *CollapsingLowestDenseStore) MergeWith(other Store) { -+ if other.IsEmpty() { -+ return -+ } -+ o, ok := other.(*CollapsingLowestDenseStore) -+ if !ok { -+ other.ForEach(func(index int, count float64) (stop bool) { -+ s.AddWithCount(index, count) -+ return false -+ }) -+ return -+ } -+ if o.minIndex < s.minIndex || o.maxIndex > s.maxIndex { -+ s.extendRange(o.minIndex, o.maxIndex) -+ } -+ idx := o.minIndex -+ for ; idx < s.minIndex && idx <= o.maxIndex; idx++ { -+ s.bins[0] += o.bins[idx-o.offset] -+ } -+ for ; idx < o.maxIndex; idx++ { -+ s.bins[idx-s.offset] += o.bins[idx-o.offset] -+ } -+ // This is a separate test so that the comparison in the previous loop is strict (<) and handles -+ // store.maxIndex = Integer.MAX_VALUE. -+ if idx == o.maxIndex { -+ s.bins[idx-s.offset] += o.bins[idx-o.offset] -+ } -+ s.count += o.count -+} -+ -+func (s *CollapsingLowestDenseStore) Copy() Store { -+ bins := make([]float64, len(s.bins)) -+ copy(bins, s.bins) -+ return &CollapsingLowestDenseStore{ -+ DenseStore: DenseStore{ -+ bins: bins, -+ count: s.count, -+ offset: s.offset, -+ minIndex: s.minIndex, -+ maxIndex: s.maxIndex, -+ }, -+ maxNumBins: s.maxNumBins, -+ isCollapsed: s.isCollapsed, -+ } -+} -+ -+func (s *CollapsingLowestDenseStore) Clear() { -+ s.DenseStore.Clear() -+ s.isCollapsed = false -+} -+ -+func (s *CollapsingLowestDenseStore) DecodeAndMergeWith(r *[]byte, encodingMode enc.SubFlag) error { -+ return DecodeAndMergeWith(s, r, encodingMode) -+} -+ -+var _ Store = (*CollapsingLowestDenseStore)(nil) -+ -+func max(x, y int) int { -+ if x > y { -+ return x -+ } -+ return y -+} -+ -+func min(x, y int) int { -+ if x < y { -+ return x -+ } -+ return y -+} -diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/store/dense_store.go b/vendor/github.com/DataDog/sketches-go/ddsketch/store/dense_store.go -new file mode 100644 -index 0000000000000..2c4a3d4a08728 ---- /dev/null -+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/store/dense_store.go -@@ -0,0 +1,330 @@ -+// Unless explicitly stated otherwise all files in this repository are licensed -+// under the Apache License 2.0. -+// This product includes software developed at Datadog (https://www.datadoghq.com/). -+// Copyright 2021 Datadog, Inc. -+ -+package store -+ -+import ( -+ ""bytes"" -+ ""errors"" -+ ""fmt"" -+ ""math"" -+ -+ enc ""github.com/DataDog/sketches-go/ddsketch/encoding"" -+ ""github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"" -+) -+ -+const ( -+ arrayLengthOverhead = 64 -+ arrayLengthGrowthIncrement = 0.1 -+ -+ // Grow the bins with an extra growthBuffer bins to prevent growing too often -+ growthBuffer = 128 -+) -+ -+// DenseStore is a dynamically growing contiguous (non-sparse) store. The number of bins are -+// bound only by the size of the slice that can be allocated. -+type DenseStore struct { -+ bins []float64 -+ count float64 -+ offset int -+ minIndex int -+ maxIndex int -+} -+ -+func NewDenseStore() *DenseStore { -+ return &DenseStore{minIndex: math.MaxInt32, maxIndex: math.MinInt32} -+} -+ -+func (s *DenseStore) Add(index int) { -+ s.AddWithCount(index, float64(1)) -+} -+ -+func (s *DenseStore) AddBin(bin Bin) { -+ if bin.count == 0 { -+ return -+ } -+ s.AddWithCount(bin.index, bin.count) -+} -+ -+func (s *DenseStore) AddWithCount(index int, count float64) { -+ if count == 0 { -+ return -+ } -+ arrayIndex := s.normalize(index) -+ s.bins[arrayIndex] += count -+ s.count += count -+} -+ -+// Normalize the store, if necessary, so that the counter of the specified index can be updated. -+func (s *DenseStore) normalize(index int) int { -+ if index < s.minIndex || index > s.maxIndex { -+ s.extendRange(index, index) -+ } -+ return index - s.offset -+} -+ -+func (s *DenseStore) getNewLength(newMinIndex, newMaxIndex int) int { -+ desiredLength := newMaxIndex - newMinIndex + 1 -+ return int((float64(desiredLength+arrayLengthOverhead-1)/arrayLengthGrowthIncrement + 1) * arrayLengthGrowthIncrement) -+} -+ -+func (s *DenseStore) extendRange(newMinIndex, newMaxIndex int) { -+ -+ newMinIndex = min(newMinIndex, s.minIndex) -+ newMaxIndex = max(newMaxIndex, s.maxIndex) -+ -+ if s.IsEmpty() { -+ initialLength := s.getNewLength(newMinIndex, newMaxIndex) -+ s.bins = append(s.bins, make([]float64, initialLength)...) -+ s.offset = newMinIndex -+ s.minIndex = newMinIndex -+ s.maxIndex = newMaxIndex -+ s.adjust(newMinIndex, newMaxIndex) -+ } else if newMinIndex >= s.offset && newMaxIndex < s.offset+len(s.bins) { -+ s.minIndex = newMinIndex -+ s.maxIndex = newMaxIndex -+ } else { -+ // To avoid shifting too often when nearing the capacity of the array, -+ // we may grow it before we actually reach the capacity. -+ newLength := s.getNewLength(newMinIndex, newMaxIndex) -+ if newLength > len(s.bins) { -+ s.bins = append(s.bins, make([]float64, newLength-len(s.bins))...) -+ } -+ s.adjust(newMinIndex, newMaxIndex) -+ } -+} -+ -+// Adjust bins, offset, minIndex and maxIndex, without resizing the bins slice in order to make it fit the -+// specified range. -+func (s *DenseStore) adjust(newMinIndex, newMaxIndex int) { -+ s.centerCounts(newMinIndex, newMaxIndex) -+} -+ -+func (s *DenseStore) centerCounts(newMinIndex, newMaxIndex int) { -+ midIndex := newMinIndex + (newMaxIndex-newMinIndex+1)/2 -+ s.shiftCounts(s.offset + len(s.bins)/2 - midIndex) -+ s.minIndex = newMinIndex -+ s.maxIndex = newMaxIndex -+} -+ -+func (s *DenseStore) shiftCounts(shift int) { -+ minArrIndex := s.minIndex - s.offset -+ maxArrIndex := s.maxIndex - s.offset -+ copy(s.bins[minArrIndex+shift:], s.bins[minArrIndex:maxArrIndex+1]) -+ if shift > 0 { -+ s.resetBins(s.minIndex, s.minIndex+shift-1) -+ } else { -+ s.resetBins(s.maxIndex+shift+1, s.maxIndex) -+ } -+ s.offset -= shift -+} -+ -+func (s *DenseStore) resetBins(fromIndex, toIndex int) { -+ for i := fromIndex - s.offset; i <= toIndex-s.offset; i++ { -+ s.bins[i] = 0 -+ } -+} -+ -+func (s *DenseStore) IsEmpty() bool { -+ return s.count == 0 -+} -+ -+func (s *DenseStore) TotalCount() float64 { -+ return s.count -+} -+ -+func (s *DenseStore) MinIndex() (int, error) { -+ if s.IsEmpty() { -+ return 0, errUndefinedMinIndex -+ } -+ return s.minIndex, nil -+} -+ -+func (s *DenseStore) MaxIndex() (int, error) { -+ if s.IsEmpty() { -+ return 0, errUndefinedMaxIndex -+ } -+ return s.maxIndex, nil -+} -+ -+// Return the key for the value at rank -+func (s *DenseStore) KeyAtRank(rank float64) int { -+ if rank < 0 { -+ rank = 0 -+ } -+ var n float64 -+ for i, b := range s.bins { -+ n += b -+ if n > rank { -+ return i + s.offset -+ } -+ } -+ return s.maxIndex -+} -+ -+func (s *DenseStore) MergeWith(other Store) { -+ if other.IsEmpty() { -+ return -+ } -+ o, ok := other.(*DenseStore) -+ if !ok { -+ other.ForEach(func(index int, count float64) (stop bool) { -+ s.AddWithCount(index, count) -+ return false -+ }) -+ return -+ } -+ if o.minIndex < s.minIndex || o.maxIndex > s.maxIndex { -+ s.extendRange(o.minIndex, o.maxIndex) -+ } -+ for idx := o.minIndex; idx <= o.maxIndex; idx++ { -+ s.bins[idx-s.offset] += o.bins[idx-o.offset] -+ } -+ s.count += o.count -+} -+ -+func (s *DenseStore) Bins() <-chan Bin { -+ ch := make(chan Bin) -+ go func() { -+ defer close(ch) -+ for idx := s.minIndex; idx <= s.maxIndex; idx++ { -+ if s.bins[idx-s.offset] > 0 { -+ ch <- Bin{index: idx, count: s.bins[idx-s.offset]} -+ } -+ } -+ }() -+ return ch -+} -+ -+func (s *DenseStore) ForEach(f func(index int, count float64) (stop bool)) { -+ for idx := s.minIndex; idx <= s.maxIndex; idx++ { -+ if s.bins[idx-s.offset] > 0 { -+ if f(idx, s.bins[idx-s.offset]) { -+ return -+ } -+ } -+ } -+} -+ -+func (s *DenseStore) Copy() Store { -+ bins := make([]float64, len(s.bins)) -+ copy(bins, s.bins) -+ return &DenseStore{ -+ bins: bins, -+ count: s.count, -+ offset: s.offset, -+ minIndex: s.minIndex, -+ maxIndex: s.maxIndex, -+ } -+} -+ -+func (s *DenseStore) Clear() { -+ s.bins = s.bins[:0] -+ s.count = 0 -+ s.minIndex = math.MaxInt32 -+ s.maxIndex = math.MinInt32 -+} -+ -+func (s *DenseStore) string() string { -+ var buffer bytes.Buffer -+ buffer.WriteString(""{"") -+ for i := 0; i < len(s.bins); i++ { -+ index := i + s.offset -+ buffer.WriteString(fmt.Sprintf(""%d: %f, "", index, s.bins[i])) -+ } -+ buffer.WriteString(fmt.Sprintf(""count: %v, offset: %d, minIndex: %d, maxIndex: %d}"", s.count, s.offset, s.minIndex, s.maxIndex)) -+ return buffer.String() -+} -+ -+func (s *DenseStore) ToProto() *sketchpb.Store { -+ if s.IsEmpty() { -+ return &sketchpb.Store{ContiguousBinCounts: nil} -+ } -+ bins := make([]float64, s.maxIndex-s.minIndex+1) -+ copy(bins, s.bins[s.minIndex-s.offset:s.maxIndex-s.offset+1]) -+ return &sketchpb.Store{ -+ ContiguousBinCounts: bins, -+ ContiguousBinIndexOffset: int32(s.minIndex), -+ } -+} -+ -+func (s *DenseStore) Reweight(w float64) error { -+ if w <= 0 { -+ return errors.New(""can't reweight by a negative factor"") -+ } -+ if w == 1 { -+ return nil -+ } -+ s.count *= w -+ for idx := s.minIndex; idx <= s.maxIndex; idx++ { -+ s.bins[idx-s.offset] *= w -+ } -+ return nil -+} -+ -+func (s *DenseStore) Encode(b *[]byte, t enc.FlagType) { -+ if s.IsEmpty() { -+ return -+ } -+ -+ denseEncodingSize := 0 -+ numBins := uint64(s.maxIndex-s.minIndex) + 1 -+ denseEncodingSize += enc.Uvarint64Size(numBins) -+ denseEncodingSize += enc.Varint64Size(int64(s.minIndex)) -+ denseEncodingSize += enc.Varint64Size(1) -+ -+ sparseEncodingSize := 0 -+ numNonEmptyBins := uint64(0) -+ -+ previousIndex := s.minIndex -+ for index := s.minIndex; index <= s.maxIndex; index++ { -+ count := s.bins[index-s.offset] -+ countVarFloat64Size := enc.Varfloat64Size(count) -+ denseEncodingSize += countVarFloat64Size -+ if count != 0 { -+ numNonEmptyBins++ -+ sparseEncodingSize += enc.Varint64Size(int64(index - previousIndex)) -+ sparseEncodingSize += countVarFloat64Size -+ previousIndex = index -+ } -+ } -+ sparseEncodingSize += enc.Uvarint64Size(numNonEmptyBins) -+ -+ if denseEncodingSize <= sparseEncodingSize { -+ s.encodeDensely(b, t, numBins) -+ } else { -+ s.encodeSparsely(b, t, numNonEmptyBins) -+ } -+} -+ -+func (s *DenseStore) encodeDensely(b *[]byte, t enc.FlagType, numBins uint64) { -+ enc.EncodeFlag(b, enc.NewFlag(t, enc.BinEncodingContiguousCounts)) -+ enc.EncodeUvarint64(b, numBins) -+ enc.EncodeVarint64(b, int64(s.minIndex)) -+ enc.EncodeVarint64(b, 1) -+ for index := s.minIndex; index <= s.maxIndex; index++ { -+ enc.EncodeVarfloat64(b, s.bins[index-s.offset]) -+ } -+} -+ -+func (s *DenseStore) encodeSparsely(b *[]byte, t enc.FlagType, numNonEmptyBins uint64) { -+ enc.EncodeFlag(b, enc.NewFlag(t, enc.BinEncodingIndexDeltasAndCounts)) -+ enc.EncodeUvarint64(b, numNonEmptyBins) -+ previousIndex := 0 -+ for index := s.minIndex; index <= s.maxIndex; index++ { -+ count := s.bins[index-s.offset] -+ if count != 0 { -+ enc.EncodeVarint64(b, int64(index-previousIndex)) -+ enc.EncodeVarfloat64(b, count) -+ previousIndex = index -+ } -+ } -+} -+ -+func (s *DenseStore) DecodeAndMergeWith(b *[]byte, encodingMode enc.SubFlag) error { -+ return DecodeAndMergeWith(s, b, encodingMode) -+} -+ -+var _ Store = (*DenseStore)(nil) -diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/store/sparse.go b/vendor/github.com/DataDog/sketches-go/ddsketch/store/sparse.go -new file mode 100644 -index 0000000000000..9a07836e91eaf ---- /dev/null -+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/store/sparse.go -@@ -0,0 +1,184 @@ -+// Unless explicitly stated otherwise all files in this repository are licensed -+// under the Apache License 2.0. -+// This product includes software developed at Datadog (https://www.datadoghq.com/). -+// Copyright 2021 Datadog, Inc. -+ -+package store -+ -+import ( -+ ""errors"" -+ ""sort"" -+ -+ enc ""github.com/DataDog/sketches-go/ddsketch/encoding"" -+ ""github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"" -+) -+ -+type SparseStore struct { -+ counts map[int]float64 -+} -+ -+func NewSparseStore() *SparseStore { -+ return &SparseStore{counts: make(map[int]float64)} -+} -+ -+func (s *SparseStore) Add(index int) { -+ s.counts[index]++ -+} -+ -+func (s *SparseStore) AddBin(bin Bin) { -+ s.AddWithCount(bin.index, bin.count) -+} -+ -+func (s *SparseStore) AddWithCount(index int, count float64) { -+ if count == 0 { -+ return -+ } -+ s.counts[index] += count -+} -+ -+func (s *SparseStore) Bins() <-chan Bin { -+ orderedBins := s.orderedBins() -+ ch := make(chan Bin) -+ go func() { -+ defer close(ch) -+ for _, bin := range orderedBins { -+ ch <- bin -+ } -+ }() -+ return ch -+} -+ -+func (s *SparseStore) orderedBins() []Bin { -+ bins := make([]Bin, 0, len(s.counts)) -+ for index, count := range s.counts { -+ bins = append(bins, Bin{index: index, count: count}) -+ } -+ sort.Slice(bins, func(i, j int) bool { return bins[i].index < bins[j].index }) -+ return bins -+} -+ -+func (s *SparseStore) ForEach(f func(index int, count float64) (stop bool)) { -+ for index, count := range s.counts { -+ if f(index, count) { -+ return -+ } -+ } -+} -+ -+func (s *SparseStore) Copy() Store { -+ countsCopy := make(map[int]float64) -+ for index, count := range s.counts { -+ countsCopy[index] = count -+ } -+ return &SparseStore{counts: countsCopy} -+} -+ -+func (s *SparseStore) Clear() { -+ for index := range s.counts { -+ delete(s.counts, index) -+ } -+} -+ -+func (s *SparseStore) IsEmpty() bool { -+ return len(s.counts) == 0 -+} -+ -+func (s *SparseStore) MaxIndex() (int, error) { -+ if s.IsEmpty() { -+ return 0, errUndefinedMaxIndex -+ } -+ maxIndex := minInt -+ for index := range s.counts { -+ if index > maxIndex { -+ maxIndex = index -+ } -+ } -+ return maxIndex, nil -+} -+ -+func (s *SparseStore) MinIndex() (int, error) { -+ if s.IsEmpty() { -+ return 0, errUndefinedMinIndex -+ } -+ minIndex := maxInt -+ for index := range s.counts { -+ if index < minIndex { -+ minIndex = index -+ } -+ } -+ return minIndex, nil -+} -+ -+func (s *SparseStore) TotalCount() float64 { -+ totalCount := float64(0) -+ for _, count := range s.counts { -+ totalCount += count -+ } -+ return totalCount -+} -+ -+func (s *SparseStore) KeyAtRank(rank float64) int { -+ orderedBins := s.orderedBins() -+ cumulCount := float64(0) -+ for _, bin := range orderedBins { -+ cumulCount += bin.count -+ if cumulCount > rank { -+ return bin.index -+ } -+ } -+ maxIndex, err := s.MaxIndex() -+ if err == nil { -+ return maxIndex -+ } else { -+ // FIXME: make Store's KeyAtRank consistent with MinIndex and MaxIndex -+ return 0 -+ } -+} -+ -+func (s *SparseStore) MergeWith(store Store) { -+ store.ForEach(func(index int, count float64) (stop bool) { -+ s.AddWithCount(index, count) -+ return false -+ }) -+} -+ -+func (s *SparseStore) ToProto() *sketchpb.Store { -+ binCounts := make(map[int32]float64) -+ for index, count := range s.counts { -+ binCounts[int32(index)] = count -+ } -+ return &sketchpb.Store{BinCounts: binCounts} -+} -+ -+func (s *SparseStore) Reweight(w float64) error { -+ if w <= 0 { -+ return errors.New(""can't reweight by a negative factor"") -+ } -+ if w == 1 { -+ return nil -+ } -+ for index := range s.counts { -+ s.counts[index] *= w -+ } -+ return nil -+} -+ -+func (s *SparseStore) Encode(b *[]byte, t enc.FlagType) { -+ if s.IsEmpty() { -+ return -+ } -+ enc.EncodeFlag(b, enc.NewFlag(t, enc.BinEncodingIndexDeltasAndCounts)) -+ enc.EncodeUvarint64(b, uint64(len(s.counts))) -+ previousIndex := 0 -+ for index, count := range s.counts { -+ enc.EncodeVarint64(b, int64(index-previousIndex)) -+ enc.EncodeVarfloat64(b, count) -+ previousIndex = index -+ } -+} -+ -+func (s *SparseStore) DecodeAndMergeWith(b *[]byte, encodingMode enc.SubFlag) error { -+ return DecodeAndMergeWith(s, b, encodingMode) -+} -+ -+var _ Store = (*SparseStore)(nil) -diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/store/store.go b/vendor/github.com/DataDog/sketches-go/ddsketch/store/store.go -new file mode 100644 -index 0000000000000..64a5e3d508f79 ---- /dev/null -+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/store/store.go -@@ -0,0 +1,153 @@ -+// Unless explicitly stated otherwise all files in this repository are licensed -+// under the Apache License 2.0. -+// This product includes software developed at Datadog (https://www.datadoghq.com/). -+// Copyright 2021 Datadog, Inc. -+ -+package store -+ -+import ( -+ ""errors"" -+ -+ enc ""github.com/DataDog/sketches-go/ddsketch/encoding"" -+ ""github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"" -+) -+ -+type Provider func() Store -+ -+var ( -+ DefaultProvider = Provider(BufferedPaginatedStoreConstructor) -+ DenseStoreConstructor = Provider(func() Store { return NewDenseStore() }) -+ BufferedPaginatedStoreConstructor = Provider(func() Store { return NewBufferedPaginatedStore() }) -+ SparseStoreConstructor = Provider(func() Store { return NewSparseStore() }) -+) -+ -+const ( -+ maxInt = int(^uint(0) >> 1) -+ minInt = ^maxInt -+) -+ -+var ( -+ errUndefinedMinIndex = errors.New(""MinIndex of empty store is undefined"") -+ errUndefinedMaxIndex = errors.New(""MaxIndex of empty store is undefined"") -+) -+ -+type Store interface { -+ Add(index int) -+ AddBin(bin Bin) -+ AddWithCount(index int, count float64) -+ // Bins returns a channel that emits the bins that are encoded in the store. -+ // Note that this leaks a channel and a goroutine if it is not iterated to completion. -+ Bins() <-chan Bin -+ // ForEach applies f to all elements of the store or until f returns true. -+ ForEach(f func(index int, count float64) (stop bool)) -+ Copy() Store -+ // Clear empties the store while allowing reusing already allocated memory. -+ // In some situations, it may be advantageous to clear and reuse a store -+ // rather than instantiating a new one. Keeping reusing the same store again -+ // and again on varying input data distributions may however ultimately make -+ // the store overly large and may waste memory space. -+ Clear() -+ IsEmpty() bool -+ MaxIndex() (int, error) -+ MinIndex() (int, error) -+ TotalCount() float64 -+ KeyAtRank(rank float64) int -+ MergeWith(store Store) -+ ToProto() *sketchpb.Store -+ // Reweight multiplies all values from the store by w, but keeps the same global distribution. -+ Reweight(w float64) error -+ // Encode encodes the bins of the store and appends its content to the -+ // provided []byte. -+ // The provided FlagType indicates whether the store encodes positive or -+ // negative values. -+ Encode(b *[]byte, t enc.FlagType) -+ // DecodeAndMergeWith decodes bins that have been encoded in the format of -+ // the provided binEncodingMode and merges them within the receiver store. -+ // It updates the provided []byte so that it starts immediately after the -+ // encoded bins. -+ DecodeAndMergeWith(b *[]byte, binEncodingMode enc.SubFlag) error -+} -+ -+// FromProto returns an instance of DenseStore that contains the data in the provided protobuf representation. -+func FromProto(pb *sketchpb.Store) *DenseStore { -+ store := NewDenseStore() -+ MergeWithProto(store, pb) -+ return store -+} -+ -+// MergeWithProto merges the distribution in a protobuf Store to an existing store. -+// - if called with an empty store, this simply populates the store with the distribution in the protobuf Store. -+// - if called with a non-empty store, this has the same outcome as deserializing the protobuf Store, then merging. -+func MergeWithProto(store Store, pb *sketchpb.Store) { -+ for idx, count := range pb.BinCounts { -+ store.AddWithCount(int(idx), count) -+ } -+ for idx, count := range pb.ContiguousBinCounts { -+ store.AddWithCount(idx+int(pb.ContiguousBinIndexOffset), count) -+ } -+} -+ -+func DecodeAndMergeWith(s Store, b *[]byte, binEncodingMode enc.SubFlag) error { -+ switch binEncodingMode { -+ -+ case enc.BinEncodingIndexDeltasAndCounts: -+ numBins, err := enc.DecodeUvarint64(b) -+ if err != nil { -+ return err -+ } -+ index := int64(0) -+ for i := uint64(0); i < numBins; i++ { -+ indexDelta, err := enc.DecodeVarint64(b) -+ if err != nil { -+ return err -+ } -+ count, err := enc.DecodeVarfloat64(b) -+ if err != nil { -+ return err -+ } -+ index += indexDelta -+ s.AddWithCount(int(index), count) -+ } -+ -+ case enc.BinEncodingIndexDeltas: -+ numBins, err := enc.DecodeUvarint64(b) -+ if err != nil { -+ return err -+ } -+ index := int64(0) -+ for i := uint64(0); i < numBins; i++ { -+ indexDelta, err := enc.DecodeVarint64(b) -+ if err != nil { -+ return err -+ } -+ index += indexDelta -+ s.Add(int(index)) -+ } -+ -+ case enc.BinEncodingContiguousCounts: -+ numBins, err := enc.DecodeUvarint64(b) -+ if err != nil { -+ return err -+ } -+ index, err := enc.DecodeVarint64(b) -+ if err != nil { -+ return err -+ } -+ indexDelta, err := enc.DecodeVarint64(b) -+ if err != nil { -+ return err -+ } -+ for i := uint64(0); i < numBins; i++ { -+ count, err := enc.DecodeVarfloat64(b) -+ if err != nil { -+ return err -+ } -+ s.AddWithCount(int(index), count) -+ index += indexDelta -+ } -+ -+ default: -+ return errors.New(""unknown bin encoding"") -+ } -+ return nil -+} -diff --git a/vendor/github.com/influxdata/tdigest/.gitignore b/vendor/github.com/influxdata/tdigest/.gitignore -new file mode 100644 -index 0000000000000..098cc7e379f94 ---- /dev/null -+++ b/vendor/github.com/influxdata/tdigest/.gitignore -@@ -0,0 +1 @@ -+/test/*.dat* -diff --git a/vendor/github.com/influxdata/tdigest/LICENSE b/vendor/github.com/influxdata/tdigest/LICENSE -new file mode 100644 -index 0000000000000..ebb2bfb1aa827 ---- /dev/null -+++ b/vendor/github.com/influxdata/tdigest/LICENSE -@@ -0,0 +1,202 @@ -+ Apache License -+ Version 2.0, January 2004 -+ http://www.apache.org/licenses/ -+ -+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -+ -+ 1. Definitions. -+ -+ ""License"" shall mean the terms and conditions for use, reproduction, -+ and distribution as defined by Sections 1 through 9 of this document. -+ -+ ""Licensor"" shall mean the copyright owner or entity authorized by -+ the copyright owner that is granting the License. -+ -+ ""Legal Entity"" shall mean the union of the acting entity and all -+ other entities that control, are controlled by, or are under common -+ control with that entity. For the purposes of this definition, -+ ""control"" means (i) the power, direct or indirect, to cause the -+ direction or management of such entity, whether by contract or -+ otherwise, or (ii) ownership of fifty percent (50%) or more of the -+ outstanding shares, or (iii) beneficial ownership of such entity. -+ -+ ""You"" (or ""Your"") shall mean an individual or Legal Entity -+ exercising permissions granted by this License. -+ -+ ""Source"" form shall mean the preferred form for making modifications, -+ including but not limited to software source code, documentation -+ source, and configuration files. -+ -+ ""Object"" form shall mean any form resulting from mechanical -+ transformation or translation of a Source form, including but -+ not limited to compiled object code, generated documentation, -+ and conversions to other media types. -+ -+ ""Work"" shall mean the work of authorship, whether in Source or -+ Object form, made available under the License, as indicated by a -+ copyright notice that is included in or attached to the work -+ (an example is provided in the Appendix below). -+ -+ ""Derivative Works"" shall mean any work, whether in Source or Object -+ form, that is based on (or derived from) the Work and for which the -+ editorial revisions, annotations, elaborations, or other modifications -+ represent, as a whole, an original work of authorship. For the purposes -+ of this License, Derivative Works shall not include works that remain -+ separable from, or merely link (or bind by name) to the interfaces of, -+ the Work and Derivative Works thereof. -+ -+ ""Contribution"" shall mean any work of authorship, including -+ the original version of the Work and any modifications or additions -+ to that Work or Derivative Works thereof, that is intentionally -+ submitted to Licensor for inclusion in the Work by the copyright owner -+ or by an individual or Legal Entity authorized to submit on behalf of -+ the copyright owner. For the purposes of this definition, ""submitted"" -+ means any form of electronic, verbal, or written communication sent -+ to the Licensor or its representatives, including but not limited to -+ communication on electronic mailing lists, source code control systems, -+ and issue tracking systems that are managed by, or on behalf of, the -+ Licensor for the purpose of discussing and improving the Work, but -+ excluding communication that is conspicuously marked or otherwise -+ designated in writing by the copyright owner as ""Not a Contribution."" -+ -+ ""Contributor"" shall mean Licensor and any individual or Legal Entity -+ on behalf of whom a Contribution has been received by Licensor and -+ subsequently incorporated within the Work. -+ -+ 2. Grant of Copyright License. Subject to the terms and conditions of -+ this License, each Contributor hereby grants to You a perpetual, -+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable -+ copyright license to reproduce, prepare Derivative Works of, -+ publicly display, publicly perform, sublicense, and distribute the -+ Work and such Derivative Works in Source or Object form. -+ -+ 3. Grant of Patent License. Subject to the terms and conditions of -+ this License, each Contributor hereby grants to You a perpetual, -+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable -+ (except as stated in this section) patent license to make, have made, -+ use, offer to sell, sell, import, and otherwise transfer the Work, -+ where such license applies only to those patent claims licensable -+ by such Contributor that are necessarily infringed by their -+ Contribution(s) alone or by combination of their Contribution(s) -+ with the Work to which such Contribution(s) was submitted. If You -+ institute patent litigation against any entity (including a -+ cross-claim or counterclaim in a lawsuit) alleging that the Work -+ or a Contribution incorporated within the Work constitutes direct -+ or contributory patent infringement, then any patent licenses -+ granted to You under this License for that Work shall terminate -+ as of the date such litigation is filed. -+ -+ 4. Redistribution. You may reproduce and distribute copies of the -+ Work or Derivative Works thereof in any medium, with or without -+ modifications, and in Source or Object form, provided that You -+ meet the following conditions: -+ -+ (a) You must give any other recipients of the Work or -+ Derivative Works a copy of this License; and -+ -+ (b) You must cause any modified files to carry prominent notices -+ stating that You changed the files; and -+ -+ (c) You must retain, in the Source form of any Derivative Works -+ that You distribute, all copyright, patent, trademark, and -+ attribution notices from the Source form of the Work, -+ excluding those notices that do not pertain to any part of -+ the Derivative Works; and -+ -+ (d) If the Work includes a ""NOTICE"" text file as part of its -+ distribution, then any Derivative Works that You distribute must -+ include a readable copy of the attribution notices contained -+ within such NOTICE file, excluding those notices that do not -+ pertain to any part of the Derivative Works, in at least one -+ of the following places: within a NOTICE text file distributed -+ as part of the Derivative Works; within the Source form or -+ documentation, if provided along with the Derivative Works; or, -+ within a display generated by the Derivative Works, if and -+ wherever such third-party notices normally appear. The contents -+ of the NOTICE file are for informational purposes only and -+ do not modify the License. You may add Your own attribution -+ notices within Derivative Works that You distribute, alongside -+ or as an addendum to the NOTICE text from the Work, provided -+ that such additional attribution notices cannot be construed -+ as modifying the License. -+ -+ You may add Your own copyright statement to Your modifications and -+ may provide additional or different license terms and conditions -+ for use, reproduction, or distribution of Your modifications, or -+ for any such Derivative Works as a whole, provided Your use, -+ reproduction, and distribution of the Work otherwise complies with -+ the conditions stated in this License. -+ -+ 5. Submission of Contributions. Unless You explicitly state otherwise, -+ any Contribution intentionally submitted for inclusion in the Work -+ by You to the Licensor shall be under the terms and conditions of -+ this License, without any additional terms or conditions. -+ Notwithstanding the above, nothing herein shall supersede or modify -+ the terms of any separate license agreement you may have executed -+ with Licensor regarding such Contributions. -+ -+ 6. Trademarks. This License does not grant permission to use the trade -+ names, trademarks, service marks, or product names of the Licensor, -+ except as required for reasonable and customary use in describing the -+ origin of the Work and reproducing the content of the NOTICE file. -+ -+ 7. Disclaimer of Warranty. Unless required by applicable law or -+ agreed to in writing, Licensor provides the Work (and each -+ Contributor provides its Contributions) on an ""AS IS"" BASIS, -+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -+ implied, including, without limitation, any warranties or conditions -+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A -+ PARTICULAR PURPOSE. You are solely responsible for determining the -+ appropriateness of using or redistributing the Work and assume any -+ risks associated with Your exercise of permissions under this License. -+ -+ 8. Limitation of Liability. In no event and under no legal theory, -+ whether in tort (including negligence), contract, or otherwise, -+ unless required by applicable law (such as deliberate and grossly -+ negligent acts) or agreed to in writing, shall any Contributor be -+ liable to You for damages, including any direct, indirect, special, -+ incidental, or consequential damages of any character arising as a -+ result of this License or out of the use or inability to use the -+ Work (including but not limited to damages for loss of goodwill, -+ work stoppage, computer failure or malfunction, or any and all -+ other commercial damages or losses), even if such Contributor -+ has been advised of the possibility of such damages. -+ -+ 9. Accepting Warranty or Additional Liability. While redistributing -+ the Work or Derivative Works thereof, You may choose to offer, -+ and charge a fee for, acceptance of support, warranty, indemnity, -+ or other liability obligations and/or rights consistent with this -+ License. However, in accepting such obligations, You may act only -+ on Your own behalf and on Your sole responsibility, not on behalf -+ of any other Contributor, and only if You agree to indemnify, -+ defend, and hold each Contributor harmless for any liability -+ incurred by, or claims asserted against, such Contributor by reason -+ of your accepting any such warranty or additional liability. -+ -+ END OF TERMS AND CONDITIONS -+ -+ APPENDIX: How to apply the Apache License to your work. -+ -+ To apply the Apache License to your work, attach the following -+ boilerplate notice, with the fields enclosed by brackets ""{}"" -+ replaced with your own identifying information. (Don't include -+ the brackets!) The text should be enclosed in the appropriate -+ comment syntax for the file format. We also recommend that a -+ file or class name and description of purpose be included on the -+ same ""printed page"" as the copyright notice for easier -+ identification within third-party archives. -+ -+ Copyright 2018 InfluxData Inc. -+ -+ Licensed under the Apache License, Version 2.0 (the ""License""); -+ you may not use this file except in compliance with the License. -+ You may obtain a copy of the License at -+ -+ http://www.apache.org/licenses/LICENSE-2.0 -+ -+ Unless required by applicable law or agreed to in writing, software -+ distributed under the License is distributed on an ""AS IS"" BASIS, -+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+ See the License for the specific language governing permissions and -+ limitations under the License. -+ -diff --git a/vendor/github.com/influxdata/tdigest/README.md b/vendor/github.com/influxdata/tdigest/README.md -new file mode 100644 -index 0000000000000..2849249985442 ---- /dev/null -+++ b/vendor/github.com/influxdata/tdigest/README.md -@@ -0,0 +1,37 @@ -+# tdigest -+ -+This is an implementation of Ted Dunning's [t-digest](https://github.com/tdunning/t-digest/) in Go. -+ -+The implementation is based off [Derrick Burns' C++ implementation](https://github.com/derrickburns/tdigest). -+ -+## Example -+ -+```go -+package main -+ -+import ( -+ ""log"" -+ -+ ""github.com/influxdata/tdigest"" -+) -+ -+func main() { -+ td := tdigest.NewWithCompression(1000) -+ for _, x := range []float64{1, 2, 3, 4, 5, 5, 4, 3, 2, 1} { -+ td.Add(x, 1) -+ } -+ -+ // Compute Quantiles -+ log.Println(""50th"", td.Quantile(0.5)) -+ log.Println(""75th"", td.Quantile(0.75)) -+ log.Println(""90th"", td.Quantile(0.9)) -+ log.Println(""99th"", td.Quantile(0.99)) -+ -+ // Compute CDFs -+ log.Println(""CDF(1) = "", td.CDF(1)) -+ log.Println(""CDF(2) = "", td.CDF(2)) -+ log.Println(""CDF(3) = "", td.CDF(3)) -+ log.Println(""CDF(4) = "", td.CDF(4)) -+ log.Println(""CDF(5) = "", td.CDF(5)) -+} -+``` -diff --git a/vendor/github.com/influxdata/tdigest/centroid.go b/vendor/github.com/influxdata/tdigest/centroid.go -new file mode 100644 -index 0000000000000..88db1b46b09f7 ---- /dev/null -+++ b/vendor/github.com/influxdata/tdigest/centroid.go -@@ -0,0 +1,60 @@ -+package tdigest -+ -+import ( -+ ""fmt"" -+ ""sort"" -+) -+ -+// ErrWeightLessThanZero is used when the weight is not able to be processed. -+const ErrWeightLessThanZero = Error(""centroid weight cannot be less than zero"") -+ -+// Error is a domain error encountered while processing tdigests -+type Error string -+ -+func (e Error) Error() string { -+ return string(e) -+} -+ -+// Centroid average position of all points in a shape -+type Centroid struct { -+ Mean float64 -+ Weight float64 -+} -+ -+func (c *Centroid) String() string { -+ return fmt.Sprintf(""{mean: %f weight: %f}"", c.Mean, c.Weight) -+} -+ -+// Add averages the two centroids together and update this centroid -+func (c *Centroid) Add(r Centroid) error { -+ if r.Weight < 0 { -+ return ErrWeightLessThanZero -+ } -+ if c.Weight != 0 { -+ c.Weight += r.Weight -+ c.Mean += r.Weight * (r.Mean - c.Mean) / c.Weight -+ } else { -+ c.Weight = r.Weight -+ c.Mean = r.Mean -+ } -+ return nil -+} -+ -+// CentroidList is sorted by the Mean of the centroid, ascending. -+type CentroidList []Centroid -+ -+// Clear clears the list. -+func (l *CentroidList) Clear() { -+ *l = (*l)[:0] -+} -+ -+func (l CentroidList) Len() int { return len(l) } -+func (l CentroidList) Less(i, j int) bool { return l[i].Mean < l[j].Mean } -+func (l CentroidList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -+ -+// NewCentroidList creates a priority queue for the centroids -+func NewCentroidList(centroids []Centroid) CentroidList { -+ l := CentroidList(centroids) -+ sort.Sort(l) -+ return l -+} -diff --git a/vendor/github.com/influxdata/tdigest/tdigest.go b/vendor/github.com/influxdata/tdigest/tdigest.go -new file mode 100644 -index 0000000000000..adab1c8b2cc77 ---- /dev/null -+++ b/vendor/github.com/influxdata/tdigest/tdigest.go -@@ -0,0 +1,309 @@ -+package tdigest -+ -+import ( -+ ""math"" -+ ""sort"" -+) -+ -+// TDigest is a data structure for accurate on-line accumulation of -+// rank-based statistics such as quantiles and trimmed means. -+type TDigest struct { -+ Compression float64 -+ -+ maxProcessed int -+ maxUnprocessed int -+ processed CentroidList -+ unprocessed CentroidList -+ cumulative []float64 -+ processedWeight float64 -+ unprocessedWeight float64 -+ min float64 -+ max float64 -+} -+ -+// New initializes a new distribution with a default compression. -+func New() *TDigest { -+ return NewWithCompression(1000) -+} -+ -+// NewWithCompression initializes a new distribution with custom compression. -+func NewWithCompression(c float64) *TDigest { -+ t := &TDigest{ -+ Compression: c, -+ } -+ t.maxProcessed = processedSize(0, t.Compression) -+ t.maxUnprocessed = unprocessedSize(0, t.Compression) -+ t.processed = make(CentroidList, 0, t.maxProcessed) -+ t.unprocessed = make(CentroidList, 0, t.maxUnprocessed+1) -+ t.Reset() -+ return t -+} -+ -+// Calculate number of bytes needed for a tdigest of size c, -+// where c is the compression value -+func ByteSizeForCompression(comp float64) int { -+ c := int(comp) -+ // // A centroid is 2 float64s, so we need 16 bytes for each centroid -+ // float_size := 8 -+ // centroid_size := 2 * float_size -+ -+ // // Unprocessed and processed can grow up to length c -+ // unprocessed_size := centroid_size * c -+ // processed_size := unprocessed_size -+ -+ // // the cumulative field can also be of length c, but each item is a single float64 -+ // cumulative_size := float_size * c // <- this could also be unprocessed_size / 2 -+ -+ // return unprocessed_size + processed_size + cumulative_size -+ -+ // // or, more succinctly: -+ // return float_size * c * 5 -+ -+ // or even more succinctly -+ return c * 40 -+} -+ -+// Reset resets the distribution to its initial state. -+func (t *TDigest) Reset() { -+ t.processed = t.processed[:0] -+ t.unprocessed = t.unprocessed[:0] -+ t.cumulative = t.cumulative[:0] -+ t.processedWeight = 0 -+ t.unprocessedWeight = 0 -+ t.min = math.MaxFloat64 -+ t.max = -math.MaxFloat64 -+} -+ -+// Add adds a value x with a weight w to the distribution. -+func (t *TDigest) Add(x, w float64) { -+ t.AddCentroid(Centroid{Mean: x, Weight: w}) -+} -+ -+// AddCentroidList can quickly add multiple centroids. -+func (t *TDigest) AddCentroidList(c CentroidList) { -+ // It's possible to optimize this by bulk-copying the slice, but this -+ // yields just a 1-2% speedup (most time is in process()), so not worth -+ // the complexity. -+ for i := range c { -+ t.AddCentroid(c[i]) -+ } -+} -+ -+// AddCentroid adds a single centroid. -+// Weights which are not a number or are <= 0 are ignored, as are NaN means. -+func (t *TDigest) AddCentroid(c Centroid) { -+ if math.IsNaN(c.Mean) || c.Weight <= 0 || math.IsNaN(c.Weight) || math.IsInf(c.Weight, 1) { -+ return -+ } -+ -+ t.unprocessed = append(t.unprocessed, c) -+ t.unprocessedWeight += c.Weight -+ -+ if t.processed.Len() > t.maxProcessed || -+ t.unprocessed.Len() > t.maxUnprocessed { -+ t.process() -+ } -+} -+ -+// Merges the supplied digest into this digest. Functionally equivalent to -+// calling t.AddCentroidList(t2.Centroids(nil)), but avoids making an extra -+// copy of the CentroidList. -+func (t *TDigest) Merge(t2 *TDigest) { -+ t2.process() -+ t.AddCentroidList(t2.processed) -+} -+ -+func (t *TDigest) process() { -+ if t.unprocessed.Len() > 0 || -+ t.processed.Len() > t.maxProcessed { -+ -+ // Append all processed centroids to the unprocessed list and sort -+ t.unprocessed = append(t.unprocessed, t.processed...) -+ sort.Sort(&t.unprocessed) -+ -+ // Reset processed list with first centroid -+ t.processed.Clear() -+ t.processed = append(t.processed, t.unprocessed[0]) -+ -+ t.processedWeight += t.unprocessedWeight -+ t.unprocessedWeight = 0 -+ soFar := t.unprocessed[0].Weight -+ limit := t.processedWeight * t.integratedQ(1.0) -+ for _, centroid := range t.unprocessed[1:] { -+ projected := soFar + centroid.Weight -+ if projected <= limit { -+ soFar = projected -+ (&t.processed[t.processed.Len()-1]).Add(centroid) -+ } else { -+ k1 := t.integratedLocation(soFar / t.processedWeight) -+ limit = t.processedWeight * t.integratedQ(k1+1.0) -+ soFar += centroid.Weight -+ t.processed = append(t.processed, centroid) -+ } -+ } -+ t.min = math.Min(t.min, t.processed[0].Mean) -+ t.max = math.Max(t.max, t.processed[t.processed.Len()-1].Mean) -+ t.unprocessed.Clear() -+ } -+} -+ -+// Centroids returns a copy of processed centroids. -+// Useful when aggregating multiple t-digests. -+// -+// Centroids are appended to the passed CentroidList; if you're re-using a -+// buffer, be sure to pass cl[:0]. -+func (t *TDigest) Centroids(cl CentroidList) CentroidList { -+ t.process() -+ return append(cl, t.processed...) -+} -+ -+func (t *TDigest) Count() float64 { -+ t.process() -+ -+ // t.process always updates t.processedWeight to the total count of all -+ // centroids, so we don't need to re-count here. -+ return t.processedWeight -+} -+ -+func (t *TDigest) updateCumulative() { -+ // Weight can only increase, so the final cumulative value will always be -+ // either equal to, or less than, the total weight. If they are the same, -+ // then nothing has changed since the last update. -+ if len(t.cumulative) > 0 && t.cumulative[len(t.cumulative)-1] == t.processedWeight { -+ return -+ } -+ -+ if n := t.processed.Len() + 1; n <= cap(t.cumulative) { -+ t.cumulative = t.cumulative[:n] -+ } else { -+ t.cumulative = make([]float64, n) -+ } -+ -+ prev := 0.0 -+ for i, centroid := range t.processed { -+ cur := centroid.Weight -+ t.cumulative[i] = prev + cur/2.0 -+ prev = prev + cur -+ } -+ t.cumulative[t.processed.Len()] = prev -+} -+ -+// Quantile returns the (approximate) quantile of -+// the distribution. Accepted values for q are between 0.0 and 1.0. -+// Returns NaN if Count is zero or bad inputs. -+func (t *TDigest) Quantile(q float64) float64 { -+ t.process() -+ t.updateCumulative() -+ if q < 0 || q > 1 || t.processed.Len() == 0 { -+ return math.NaN() -+ } -+ if t.processed.Len() == 1 { -+ return t.processed[0].Mean -+ } -+ index := q * t.processedWeight -+ if index <= t.processed[0].Weight/2.0 { -+ return t.min + 2.0*index/t.processed[0].Weight*(t.processed[0].Mean-t.min) -+ } -+ -+ lower := sort.Search(len(t.cumulative), func(i int) bool { -+ return t.cumulative[i] >= index -+ }) -+ -+ if lower+1 != len(t.cumulative) { -+ z1 := index - t.cumulative[lower-1] -+ z2 := t.cumulative[lower] - index -+ return weightedAverage(t.processed[lower-1].Mean, z2, t.processed[lower].Mean, z1) -+ } -+ -+ z1 := index - t.processedWeight - t.processed[lower-1].Weight/2.0 -+ z2 := (t.processed[lower-1].Weight / 2.0) - z1 -+ return weightedAverage(t.processed[t.processed.Len()-1].Mean, z1, t.max, z2) -+} -+ -+// CDF returns the cumulative distribution function for a given value x. -+func (t *TDigest) CDF(x float64) float64 { -+ t.process() -+ t.updateCumulative() -+ switch t.processed.Len() { -+ case 0: -+ return 0.0 -+ case 1: -+ width := t.max - t.min -+ if x <= t.min { -+ return 0.0 -+ } -+ if x >= t.max { -+ return 1.0 -+ } -+ if (x - t.min) <= width { -+ // min and max are too close together to do any viable interpolation -+ return 0.5 -+ } -+ return (x - t.min) / width -+ } -+ -+ if x <= t.min { -+ return 0.0 -+ } -+ if x >= t.max { -+ return 1.0 -+ } -+ m0 := t.processed[0].Mean -+ // Left Tail -+ if x <= m0 { -+ if m0-t.min > 0 { -+ return (x - t.min) / (m0 - t.min) * t.processed[0].Weight / t.processedWeight / 2.0 -+ } -+ return 0.0 -+ } -+ // Right Tail -+ mn := t.processed[t.processed.Len()-1].Mean -+ if x >= mn { -+ if t.max-mn > 0.0 { -+ return 1.0 - (t.max-x)/(t.max-mn)*t.processed[t.processed.Len()-1].Weight/t.processedWeight/2.0 -+ } -+ return 1.0 -+ } -+ -+ upper := sort.Search(t.processed.Len(), func(i int) bool { -+ return t.processed[i].Mean > x -+ }) -+ -+ z1 := x - t.processed[upper-1].Mean -+ z2 := t.processed[upper].Mean - x -+ return weightedAverage(t.cumulative[upper-1], z2, t.cumulative[upper], z1) / t.processedWeight -+} -+ -+func (t *TDigest) integratedQ(k float64) float64 { -+ return (math.Sin(math.Min(k, t.Compression)*math.Pi/t.Compression-math.Pi/2.0) + 1.0) / 2.0 -+} -+ -+func (t *TDigest) integratedLocation(q float64) float64 { -+ return t.Compression * (math.Asin(2.0*q-1.0) + math.Pi/2.0) / math.Pi -+} -+ -+func weightedAverage(x1, w1, x2, w2 float64) float64 { -+ if x1 <= x2 { -+ return weightedAverageSorted(x1, w1, x2, w2) -+ } -+ return weightedAverageSorted(x2, w2, x1, w1) -+} -+ -+func weightedAverageSorted(x1, w1, x2, w2 float64) float64 { -+ x := (x1*w1 + x2*w2) / (w1 + w2) -+ return math.Max(x1, math.Min(x, x2)) -+} -+ -+func processedSize(size int, compression float64) int { -+ if size == 0 { -+ return int(2 * math.Ceil(compression)) -+ } -+ return size -+} -+ -+func unprocessedSize(size int, compression float64) int { -+ if size == 0 { -+ return int(8 * math.Ceil(compression)) -+ } -+ return size -+} -diff --git a/vendor/modules.txt b/vendor/modules.txt -index 3f52d1a420f43..e3f827d77e720 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -158,6 +158,14 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options - github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared - github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version - github.com/AzureAD/microsoft-authentication-library-for-go/apps/public -+# github.com/DataDog/sketches-go v1.4.2 -+## explicit; go 1.15 -+github.com/DataDog/sketches-go/ddsketch -+github.com/DataDog/sketches-go/ddsketch/encoding -+github.com/DataDog/sketches-go/ddsketch/mapping -+github.com/DataDog/sketches-go/ddsketch/pb/sketchpb -+github.com/DataDog/sketches-go/ddsketch/stat -+github.com/DataDog/sketches-go/ddsketch/store - # github.com/DmitriyVTitov/size v1.5.0 - ## explicit; go 1.14 - github.com/DmitriyVTitov/size -@@ -969,6 +977,9 @@ github.com/influxdata/go-syslog/v3/common - github.com/influxdata/go-syslog/v3/nontransparent - github.com/influxdata/go-syslog/v3/octetcounting - github.com/influxdata/go-syslog/v3/rfc5424 -+# github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b -+## explicit; go 1.13 -+github.com/influxdata/tdigest - # github.com/influxdata/telegraf v1.16.3 - ## explicit; go 1.15 - github.com/influxdata/telegraf",unknown,Define sketches for quantiles. (#10659) -7889e01cd0b59e4ad8d475735f74ae8cd47308ad,2020-07-12 03:26:32,Ed Welch,"looks like yamale 3.0.0 was released with a breaking change in a method parameter, pinning to 2.2.0 (#2341)",False,"diff --git a/.circleci/config.yml b/.circleci/config.yml -index 58a5028f7e105..5327b751e93f7 100644 ---- a/.circleci/config.yml -+++ b/.circleci/config.yml -@@ -129,7 +129,7 @@ jobs: - - run: - name: Install Chart Testing tool - command: | -- pip install yamale yamllint -+ pip install yamale==2.2.0 yamllint - curl -Lo ct.tgz https://github.com/helm/chart-testing/releases/download/v${CT_VERSION}/chart-testing_${CT_VERSION}_linux_amd64.tar.gz - sudo tar -C /usr/local/bin -xvf ct.tgz - sudo mv /usr/local/bin/etc /etc/ct/",unknown,"looks like yamale 3.0.0 was released with a breaking change in a method parameter, pinning to 2.2.0 (#2341)" -bd0ca9387da3408b6e7097fed8d5693f05cf1e8c,2025-02-13 11:38:08,Sandeep Sukhani,chore: refactor delete requests store interface to prepare for replacing boltdb with sqlite for storing delete requests (#16181),False,"diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go -index 908152a3edbbe..e19bfec2457a9 100644 ---- a/pkg/compactor/compactor.go -+++ b/pkg/compactor/compactor.go -@@ -369,6 +369,7 @@ func (c *Compactor) initDeletes(objectClient client.ObjectClient, r prometheus.R - c.DeleteRequestsHandler = deletion.NewDeleteRequestHandler( - c.deleteRequestsStore, - c.cfg.DeleteMaxInterval, -+ c.cfg.DeleteRequestCancelPeriod, - r, - ) - -diff --git a/pkg/compactor/deletion/delete_requests_manager.go b/pkg/compactor/deletion/delete_requests_manager.go -index 9391d50801b07..230a67ce79882 100644 ---- a/pkg/compactor/deletion/delete_requests_manager.go -+++ b/pkg/compactor/deletion/delete_requests_manager.go -@@ -3,9 +3,7 @@ package deletion - import ( - ""context"" - ""fmt"" -- ""slices"" - ""sort"" -- ""strings"" - ""sync"" - ""time"" - -@@ -58,7 +56,7 @@ func NewDeleteRequestsManager(store DeleteRequestsStore, deleteRequestCancelPeri - - go dm.loop() - -- if err := dm.mergeShardedRequests(context.Background()); err != nil { -+ if err := dm.deleteRequestsStore.MergeShardedRequests(context.Background()); err != nil { - level.Error(util_log.Logger).Log(""msg"", ""failed to merge sharded requests"", ""err"", err) - } - -@@ -89,70 +87,8 @@ func (d *DeleteRequestsManager) Stop() { - d.wg.Wait() - } - --// mergeShardedRequests merges the sharded requests back to a single request when we are done with processing all the shards --func (d *DeleteRequestsManager) mergeShardedRequests(ctx context.Context) error { -- deleteGroups, err := d.deleteRequestsStore.GetAllDeleteRequests(context.Background()) -- if err != nil { -- return err -- } -- -- slices.SortFunc(deleteGroups, func(a, b DeleteRequest) int { -- return strings.Compare(a.RequestID, b.RequestID) -- }) -- deleteRequests := mergeDeletes(deleteGroups) -- for _, req := range deleteRequests { -- // do not consider requests which do not have an id. Request ID won't be set in some tests or there is a bug in our code for loading requests. -- if req.RequestID == """" { -- level.Error(util_log.Logger).Log(""msg"", ""skipped considering request without an id for merging its shards"", -- ""user_id"", req.UserID, -- ""start_time"", req.StartTime.Unix(), -- ""end_time"", req.EndTime.Unix(), -- ""query"", req.Query, -- ) -- continue -- } -- // do not do anything if we are not done with processing all the shards or the number of shards is 1 -- if req.Status != StatusProcessed { -- continue -- } -- -- var idxStart, idxEnd int -- for i := range deleteGroups { -- if req.RequestID == deleteGroups[i].RequestID { -- idxStart = i -- break -- } -- } -- -- for i := len(deleteGroups) - 1; i > 0; i-- { -- if req.RequestID == deleteGroups[i].RequestID { -- idxEnd = i -- break -- } -- } -- -- // do not do anything if the number of shards is 1 -- if idxStart == idxEnd { -- continue -- } -- reqShards := deleteGroups[idxStart : idxEnd+1] -- -- level.Info(util_log.Logger).Log(""msg"", ""merging sharded request"", -- ""request_id"", req.RequestID, -- ""num_shards"", len(reqShards), -- ""start_time"", req.StartTime.Unix(), -- ""end_time"", req.EndTime.Unix(), -- ) -- if err := d.deleteRequestsStore.MergeShardedRequests(ctx, req, reqShards); err != nil { -- return err -- } -- } -- -- return nil --} -- - func (d *DeleteRequestsManager) updateMetrics() error { -- deleteRequests, err := d.deleteRequestsStore.GetDeleteRequestsByStatus(context.Background(), StatusReceived) -+ deleteRequests, err := d.deleteRequestsStore.GetUnprocessedShards(context.Background()) - if err != nil { - return err - } -@@ -267,7 +203,7 @@ func (d *DeleteRequestsManager) loadDeleteRequestsToProcess() error { - } - - func (d *DeleteRequestsManager) filteredSortedDeleteRequests() ([]DeleteRequest, error) { -- deleteRequests, err := d.deleteRequestsStore.GetDeleteRequestsByStatus(context.Background(), StatusReceived) -+ deleteRequests, err := d.deleteRequestsStore.GetUnprocessedShards(context.Background()) - if err != nil { - return nil, err - } -@@ -418,7 +354,7 @@ func (d *DeleteRequestsManager) MarkPhaseTimedOut() { - } - - func (d *DeleteRequestsManager) markRequestAsProcessed(deleteRequest DeleteRequest) { -- if err := d.deleteRequestsStore.UpdateStatus(context.Background(), deleteRequest, StatusProcessed); err != nil { -+ if err := d.deleteRequestsStore.MarkShardAsProcessed(context.Background(), deleteRequest); err != nil { - level.Error(util_log.Logger).Log( - ""msg"", ""failed to mark delete request for user as processed"", - ""delete_request_id"", deleteRequest.RequestID, -@@ -462,7 +398,7 @@ func (d *DeleteRequestsManager) MarkPhaseFinished() { - d.markRequestAsProcessed(req) - } - -- if err := d.mergeShardedRequests(context.Background()); err != nil { -+ if err := d.deleteRequestsStore.MergeShardedRequests(context.Background()); err != nil { - level.Error(util_log.Logger).Log(""msg"", ""failed to merge sharded requests"", ""err"", err) - } - } -diff --git a/pkg/compactor/deletion/delete_requests_manager_test.go b/pkg/compactor/deletion/delete_requests_manager_test.go -index e4b06f7e551b6..a3be10a136dcd 100644 ---- a/pkg/compactor/deletion/delete_requests_manager_test.go -+++ b/pkg/compactor/deletion/delete_requests_manager_test.go -@@ -981,7 +981,7 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { - - mgr.MarkPhaseFinished() - -- processedRequests, err := mockDeleteRequestsStore.GetDeleteRequestsByStatus(context.Background(), StatusProcessed) -+ processedRequests, err := mockDeleteRequestsStore.getDeleteRequestsByStatus(StatusProcessed) - require.NoError(t, err) - require.Len(t, processedRequests, len(tc.expectedRequestsMarkedAsProcessed)) - -@@ -1018,14 +1018,24 @@ func TestDeleteRequestsManager_IntervalMayHaveExpiredChunks(t *testing.T) { - } - } - -+type storeAddReqDetails struct { -+ userID, query string -+ startTime, endTime model.Time -+ shardByInterval time.Duration -+} -+ -+type removeReqDetails struct { -+ userID, reqID string -+} -+ - type mockDeleteRequestsStore struct { - DeleteRequestsStore - deleteRequests []DeleteRequest -- addReqs []DeleteRequest -+ addReq storeAddReqDetails - addErr error - returnZeroDeleteRequests bool - -- removeReqs []DeleteRequest -+ removeReqs removeReqDetails - removeErr error - - getUser string -@@ -1040,7 +1050,11 @@ type mockDeleteRequestsStore struct { - genNumber string - } - --func (m *mockDeleteRequestsStore) GetDeleteRequestsByStatus(_ context.Context, status DeleteRequestStatus) ([]DeleteRequest, error) { -+func (m *mockDeleteRequestsStore) GetUnprocessedShards(_ context.Context) ([]DeleteRequest, error) { -+ return m.getDeleteRequestsByStatus(StatusReceived) -+} -+ -+func (m *mockDeleteRequestsStore) getDeleteRequestsByStatus(status DeleteRequestStatus) ([]DeleteRequest, error) { - reqs := make([]DeleteRequest, 0, len(m.deleteRequests)) - for i := range m.deleteRequests { - if m.deleteRequests[i].Status == status { -@@ -1050,27 +1064,36 @@ func (m *mockDeleteRequestsStore) GetDeleteRequestsByStatus(_ context.Context, s - return reqs, nil - } - --func (m *mockDeleteRequestsStore) GetAllDeleteRequests(_ context.Context) ([]DeleteRequest, error) { -+func (m *mockDeleteRequestsStore) GetAllRequests(_ context.Context) ([]DeleteRequest, error) { - return m.deleteRequests, nil - } - --func (m *mockDeleteRequestsStore) AddDeleteRequestGroup(_ context.Context, reqs []DeleteRequest) ([]DeleteRequest, error) { -- m.addReqs = reqs -- if m.returnZeroDeleteRequests { -- return []DeleteRequest{}, m.addErr -+func (m *mockDeleteRequestsStore) AddDeleteRequest(_ context.Context, userID, query string, startTime, endTime model.Time, shardByInterval time.Duration) (string, error) { -+ m.addReq = storeAddReqDetails{ -+ userID: userID, -+ query: query, -+ startTime: startTime, -+ endTime: endTime, -+ shardByInterval: shardByInterval, - } -- return m.addReqs, m.addErr -+ return """", m.addErr - } - --func (m *mockDeleteRequestsStore) RemoveDeleteRequests(_ context.Context, reqs []DeleteRequest) error { -- m.removeReqs = reqs -+func (m *mockDeleteRequestsStore) RemoveDeleteRequest(_ context.Context, userID string, requestID string) error { -+ m.removeReqs = removeReqDetails{ -+ userID: userID, -+ reqID: requestID, -+ } - return m.removeErr - } - --func (m *mockDeleteRequestsStore) GetDeleteRequestGroup(_ context.Context, userID, requestID string) ([]DeleteRequest, error) { -+func (m *mockDeleteRequestsStore) GetDeleteRequest(_ context.Context, userID, requestID string) (DeleteRequest, error) { - m.getUser = userID - m.getID = requestID -- return m.getResult, m.getErr -+ if m.getErr != nil { -+ return DeleteRequest{}, m.getErr -+ } -+ return m.getResult[0], m.getErr - } - - func (m *mockDeleteRequestsStore) GetAllDeleteRequestsForUser(_ context.Context, userID string) ([]DeleteRequest, error) { -@@ -1082,32 +1105,17 @@ func (m *mockDeleteRequestsStore) GetCacheGenerationNumber(_ context.Context, _ - return m.genNumber, m.getErr - } - --func (m *mockDeleteRequestsStore) UpdateStatus(_ context.Context, req DeleteRequest, newStatus DeleteRequestStatus) error { -+func (m *mockDeleteRequestsStore) MarkShardAsProcessed(_ context.Context, req DeleteRequest) error { - for i := range m.deleteRequests { - if requestsAreEqual(m.deleteRequests[i], req) { -- m.deleteRequests[i].Status = newStatus -+ m.deleteRequests[i].Status = StatusProcessed - } - } - - return nil - } - --func (m *mockDeleteRequestsStore) MergeShardedRequests(_ context.Context, requestToAdd DeleteRequest, requestsToRemove []DeleteRequest) error { -- n := 0 -- for i := range m.deleteRequests { -- for j := range requestsToRemove { -- if requestsAreEqual(m.deleteRequests[i], requestsToRemove[j]) { -- continue -- } -- m.deleteRequests[n] = m.deleteRequests[i] -- n++ -- break -- } -- } -- -- m.deleteRequests = m.deleteRequests[:n] -- m.deleteRequests = append(m.deleteRequests, requestToAdd) -- -+func (m *mockDeleteRequestsStore) MergeShardedRequests(_ context.Context) error { - return nil - } - -@@ -1124,86 +1132,6 @@ func requestsAreEqual(req1, req2 DeleteRequest) bool { - return false - } - --func TestDeleteRequestsManager_mergeShardedRequests(t *testing.T) { -- for _, tc := range []struct { -- name string -- reqsToAdd []DeleteRequest -- shouldMarkProcessed func(DeleteRequest) bool -- requestsShouldBeMerged bool -- }{ -- { -- name: ""no requests in store"", -- }, -- { -- name: ""none of the requests are processed - should not merge"", -- reqsToAdd: buildRequests(time.Hour, `{foo=""bar""}`, user1, now.Add(-24*time.Hour), now), -- shouldMarkProcessed: func(_ DeleteRequest) bool { -- return false -- }, -- }, -- { -- name: ""not all requests are processed - should not merge"", -- reqsToAdd: buildRequests(time.Hour, `{foo=""bar""}`, user1, now.Add(-24*time.Hour), now), -- shouldMarkProcessed: func(request DeleteRequest) bool { -- return request.SequenceNum%2 == 0 -- }, -- }, -- { -- name: ""all the requests are processed - should merge"", -- reqsToAdd: buildRequests(time.Hour, `{foo=""bar""}`, user1, now.Add(-24*time.Hour), now), -- shouldMarkProcessed: func(_ DeleteRequest) bool { -- return true -- }, -- requestsShouldBeMerged: true, -- }, -- { // build requests for 2 different users and mark all requests as processed for just one of the two -- name: ""merging requests from one user should not touch another users requests"", -- reqsToAdd: append( -- buildRequests(time.Hour, `{foo=""bar""}`, user1, now.Add(-24*time.Hour), now), -- buildRequests(time.Hour, `{foo=""bar""}`, user2, now.Add(-24*time.Hour), now)..., -- ), -- shouldMarkProcessed: func(request DeleteRequest) bool { -- return request.UserID == user2 -- }, -- }, -- } { -- t.Run(tc.name, func(t *testing.T) { -- mgr := setupManager(t) -- reqs, err := mgr.deleteRequestsStore.AddDeleteRequestGroup(context.Background(), tc.reqsToAdd) -- require.NoError(t, err) -- require.GreaterOrEqual(t, len(reqs), len(tc.reqsToAdd)) -- -- for _, req := range reqs { -- if !tc.shouldMarkProcessed(req) { -- continue -- } -- require.NoError(t, mgr.deleteRequestsStore.UpdateStatus(context.Background(), req, StatusProcessed)) -- } -- -- inStoreReqs, err := mgr.deleteRequestsStore.GetAllDeleteRequestsForUser(context.Background(), user1) -- require.NoError(t, err) -- -- require.NoError(t, mgr.mergeShardedRequests(context.Background())) -- inStoreReqsAfterMerging, err := mgr.deleteRequestsStore.GetAllDeleteRequestsForUser(context.Background(), user1) -- require.NoError(t, err) -- -- if tc.requestsShouldBeMerged { -- require.Len(t, inStoreReqsAfterMerging, 1) -- require.True(t, requestsAreEqual(inStoreReqsAfterMerging[0], DeleteRequest{ -- UserID: user1, -- Query: tc.reqsToAdd[0].Query, -- StartTime: tc.reqsToAdd[0].StartTime, -- EndTime: tc.reqsToAdd[len(tc.reqsToAdd)-1].EndTime, -- Status: StatusProcessed, -- })) -- } else { -- require.Len(t, inStoreReqsAfterMerging, len(inStoreReqs)) -- require.Equal(t, inStoreReqs, inStoreReqsAfterMerging) -- } -- }) -- } --} -- - func setupManager(t *testing.T) *DeleteRequestsManager { - t.Helper() - // build the store -diff --git a/pkg/compactor/deletion/delete_requests_store.go b/pkg/compactor/deletion/delete_requests_store.go -index 405ffef08cb64..edaecd87cae47 100644 ---- a/pkg/compactor/deletion/delete_requests_store.go -+++ b/pkg/compactor/deletion/delete_requests_store.go -@@ -7,18 +7,20 @@ import ( - ""errors"" - ""fmt"" - ""hash/fnv"" -+ ""slices"" - ""sort"" - ""strconv"" - ""strings"" - ""time"" - ""unsafe"" - -+ ""github.com/go-kit/log/level"" - ""github.com/grafana/dskit/user"" -- - ""github.com/prometheus/common/model"" - - ""github.com/grafana/loki/v3/pkg/storage/stores/series/index"" - ""github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/storage"" -+ util_log ""github.com/grafana/loki/v3/pkg/util/log"" - ) - - type ( -@@ -41,15 +43,19 @@ const ( - var ErrDeleteRequestNotFound = errors.New(""could not find matching delete requests"") - - type DeleteRequestsStore interface { -- AddDeleteRequestGroup(ctx context.Context, req []DeleteRequest) ([]DeleteRequest, error) -- GetDeleteRequestsByStatus(ctx context.Context, status DeleteRequestStatus) ([]DeleteRequest, error) -- GetAllDeleteRequests(ctx context.Context) ([]DeleteRequest, error) -+ AddDeleteRequest(ctx context.Context, userID, query string, startTime, endTime model.Time, shardByInterval time.Duration) (string, error) -+ GetAllRequests(ctx context.Context) ([]DeleteRequest, error) - GetAllDeleteRequestsForUser(ctx context.Context, userID string) ([]DeleteRequest, error) -- UpdateStatus(ctx context.Context, req DeleteRequest, newStatus DeleteRequestStatus) error -- GetDeleteRequestGroup(ctx context.Context, userID, requestID string) ([]DeleteRequest, error) -- RemoveDeleteRequests(ctx context.Context, req []DeleteRequest) error -+ RemoveDeleteRequest(ctx context.Context, userID string, requestID string) error -+ GetDeleteRequest(ctx context.Context, userID, requestID string) (DeleteRequest, error) - GetCacheGenerationNumber(ctx context.Context, userID string) (string, error) -- MergeShardedRequests(ctx context.Context, requestToAdd DeleteRequest, requestsToRemove []DeleteRequest) error -+ MergeShardedRequests(ctx context.Context) error -+ -+ // ToDo(Sandeep): To keep changeset smaller, below 2 methods treat a single shard as individual request. This can be refactored later in a separate PR. -+ MarkShardAsProcessed(ctx context.Context, req DeleteRequest) error -+ GetUnprocessedShards(ctx context.Context) ([]DeleteRequest, error) -+ GetAllShards(ctx context.Context) ([]DeleteRequest, error) -+ - Stop() - Name() string - } -@@ -77,25 +83,25 @@ func (ds *deleteRequestsStore) Stop() { - ds.indexClient.Stop() - } - --// AddDeleteRequestGroup creates entries for new delete requests. All passed delete requests will be associated to -+// AddDeleteRequest creates entries for new delete requests. All passed delete requests will be associated to - // each other by request id --func (ds *deleteRequestsStore) AddDeleteRequestGroup(ctx context.Context, reqs []DeleteRequest) ([]DeleteRequest, error) { -+func (ds *deleteRequestsStore) AddDeleteRequest(ctx context.Context, userID, query string, startTime, endTime model.Time, shardByInterval time.Duration) (string, error) { -+ reqs := buildRequests(shardByInterval, query, userID, startTime, endTime) - if len(reqs) == 0 { -- return nil, nil -+ return """", fmt.Errorf(""zero delete requests created"") - } -- - createdAt := ds.now() - writeBatch := ds.indexClient.NewWriteBatch() - requestID, err := ds.generateID(ctx, reqs[0]) - if err != nil { -- return nil, err -+ return """", err - } - - var results []DeleteRequest - for i, req := range reqs { - newReq, err := newRequest(req, requestID, createdAt, i) - if err != nil { -- return nil, err -+ return """", err - } - - results = append(results, newReq) -@@ -104,13 +110,13 @@ func (ds *deleteRequestsStore) AddDeleteRequestGroup(ctx context.Context, reqs [ - ds.updateCacheGen(reqs[0].UserID, writeBatch) - - if err := ds.indexClient.BatchWrite(ctx, writeBatch); err != nil { -- return nil, err -+ return """", err - } - -- return results, nil -+ return requestID, nil - } - --func (ds *deleteRequestsStore) MergeShardedRequests(ctx context.Context, requestToAdd DeleteRequest, requestsToRemove []DeleteRequest) error { -+func (ds *deleteRequestsStore) mergeShardedRequests(ctx context.Context, requestToAdd DeleteRequest, requestsToRemove []DeleteRequest) error { - writeBatch := ds.indexClient.NewWriteBatch() - - ds.writeDeleteRequest(requestToAdd, writeBatch) -@@ -122,8 +128,8 @@ func (ds *deleteRequestsStore) MergeShardedRequests(ctx context.Context, request - return ds.indexClient.BatchWrite(ctx, writeBatch) - } - --func newRequest(req DeleteRequest, requestID []byte, createdAt model.Time, seqNumber int) (DeleteRequest, error) { -- req.RequestID = string(requestID) -+func newRequest(req DeleteRequest, requestID string, createdAt model.Time, seqNumber int) (DeleteRequest, error) { -+ req.RequestID = requestID - req.Status = StatusReceived - req.CreatedAt = createdAt - req.SequenceNum = int64(seqNumber) -@@ -162,15 +168,15 @@ func backwardCompatibleDeleteRequestHash(userID, requestID string, sequenceNumbe - return fmt.Sprintf(""%s:%s:%d"", userID, requestID, sequenceNumber) - } - --func (ds *deleteRequestsStore) generateID(ctx context.Context, req DeleteRequest) ([]byte, error) { -+func (ds *deleteRequestsStore) generateID(ctx context.Context, req DeleteRequest) (string, error) { - requestID := generateUniqueID(req.UserID, req.Query) - - for { -- if _, err := ds.GetDeleteRequestGroup(ctx, req.UserID, string(requestID)); err != nil { -- if err == ErrDeleteRequestNotFound { -+ if _, err := ds.GetDeleteRequest(ctx, req.UserID, requestID); err != nil { -+ if errors.Is(err, ErrDeleteRequestNotFound) { - return requestID, nil - } -- return nil, err -+ return """", err - } - - // we have a collision here, lets recreate a new requestID and check for collision -@@ -179,44 +185,77 @@ func (ds *deleteRequestsStore) generateID(ctx context.Context, req DeleteRequest - } - } - --// GetDeleteRequestsByStatus returns all delete requests for given status. --func (ds *deleteRequestsStore) GetDeleteRequestsByStatus(ctx context.Context, status DeleteRequestStatus) ([]DeleteRequest, error) { -+// GetUnprocessedShards returns all the unprocessed shards as individual delete requests. -+func (ds *deleteRequestsStore) GetUnprocessedShards(ctx context.Context) ([]DeleteRequest, error) { - return ds.queryDeleteRequests(ctx, index.Query{ - TableName: DeleteRequestsTableName, - HashValue: string(deleteRequestID), -- ValueEqual: []byte(status), -+ ValueEqual: []byte(StatusReceived), - }) - } - --// GetAllDeleteRequests returns all the delete requests. --func (ds *deleteRequestsStore) GetAllDeleteRequests(ctx context.Context) ([]DeleteRequest, error) { -+// GetAllShards returns all the shards as individual delete requests. -+func (ds *deleteRequestsStore) GetAllShards(ctx context.Context) ([]DeleteRequest, error) { - return ds.queryDeleteRequests(ctx, index.Query{ - TableName: DeleteRequestsTableName, - HashValue: string(deleteRequestID), - }) - } - -+// GetAllRequests returns all the delete requests. -+func (ds *deleteRequestsStore) GetAllRequests(ctx context.Context) ([]DeleteRequest, error) { -+ deleteGroups, err := ds.GetAllShards(ctx) -+ if err != nil { -+ return nil, err -+ } -+ -+ deleteRequests := mergeDeletes(deleteGroups) -+ return deleteRequests, nil -+} -+ - // GetAllDeleteRequestsForUser returns all delete requests for a user. - func (ds *deleteRequestsStore) GetAllDeleteRequestsForUser(ctx context.Context, userID string) ([]DeleteRequest, error) { -- return ds.queryDeleteRequests(ctx, index.Query{ -+ deleteGroups, err := ds.queryDeleteRequests(ctx, index.Query{ - TableName: DeleteRequestsTableName, - HashValue: string(deleteRequestID), - RangeValuePrefix: []byte(userID), - }) -+ if err != nil { -+ return nil, err -+ } -+ -+ deleteRequests := mergeDeletes(deleteGroups) -+ return deleteRequests, nil - } - --// UpdateStatus updates status of a delete request. --func (ds *deleteRequestsStore) UpdateStatus(ctx context.Context, req DeleteRequest, newStatus DeleteRequestStatus) error { -+// MarkShardAsProcessed marks a delete request shard as processed. -+func (ds *deleteRequestsStore) MarkShardAsProcessed(ctx context.Context, req DeleteRequest) error { - userIDAndRequestID := backwardCompatibleDeleteRequestHash(req.UserID, req.RequestID, req.SequenceNum) - - writeBatch := ds.indexClient.NewWriteBatch() -- writeBatch.Add(DeleteRequestsTableName, string(deleteRequestID), []byte(userIDAndRequestID), []byte(newStatus)) -+ writeBatch.Add(DeleteRequestsTableName, string(deleteRequestID), []byte(userIDAndRequestID), []byte(StatusProcessed)) - - return ds.indexClient.BatchWrite(ctx, writeBatch) - } - --// GetDeleteRequestGroup returns delete requests with given requestID. --func (ds *deleteRequestsStore) GetDeleteRequestGroup(ctx context.Context, userID, requestID string) ([]DeleteRequest, error) { -+// GetDeleteRequest finds and returns delete request with given ID. -+func (ds *deleteRequestsStore) GetDeleteRequest(ctx context.Context, userID, requestID string) (DeleteRequest, error) { -+ reqGroup, err := ds.getDeleteRequestGroup(ctx, userID, requestID) -+ if err != nil { -+ return DeleteRequest{}, err -+ } -+ -+ startTime, endTime, status := mergeData(reqGroup) -+ deleteRequest := reqGroup[0] -+ deleteRequest.StartTime = startTime -+ deleteRequest.EndTime = endTime -+ deleteRequest.Status = status -+ -+ return deleteRequest, nil -+} -+ -+// getDeleteRequestGroup returns delete requests with given requestID. -+func (ds *deleteRequestsStore) getDeleteRequestGroup(ctx context.Context, userID, requestID string) ([]DeleteRequest, error) { - userIDAndRequestID := fmt.Sprintf(""%s:%s"", userID, requestID) - - deleteRequests, err := ds.queryDeleteRequests(ctx, index.Query{ -@@ -341,8 +380,13 @@ func unmarshalDeleteRequestDetails(itr index.ReadBatchIterator, req DeleteReques - return requestWithDetails, nil - } - --// RemoveDeleteRequests the passed delete requests --func (ds *deleteRequestsStore) RemoveDeleteRequests(ctx context.Context, reqs []DeleteRequest) error { -+// RemoveDeleteRequest removes the passed delete request -+func (ds *deleteRequestsStore) RemoveDeleteRequest(ctx context.Context, userID, requestID string) error { -+ reqs, err := ds.getDeleteRequestGroup(ctx, userID, requestID) -+ if err != nil { -+ return err -+ } -+ - if len(reqs) == 0 { - return nil - } -@@ -369,6 +413,68 @@ func (ds *deleteRequestsStore) Name() string { - return ""delete_requests_store"" - } - -+// MergeShardedRequests merges the sharded requests back to a single request when we are done with processing all the shards -+func (ds *deleteRequestsStore) MergeShardedRequests(ctx context.Context) error { -+ deleteGroups, err := ds.GetAllShards(context.Background()) -+ if err != nil { -+ return err -+ } -+ -+ slices.SortFunc(deleteGroups, func(a, b DeleteRequest) int { -+ return strings.Compare(a.RequestID, b.RequestID) -+ }) -+ deleteRequests := mergeDeletes(deleteGroups) -+ for _, req := range deleteRequests { -+ // do not consider requests which do not have an id. Request ID won't be set in some tests or there is a bug in our code for loading requests. -+ if req.RequestID == """" { -+ level.Error(util_log.Logger).Log(""msg"", ""skipped considering request without an id for merging its shards"", -+ ""user_id"", req.UserID, -+ ""start_time"", req.StartTime.Unix(), -+ ""end_time"", req.EndTime.Unix(), -+ ""query"", req.Query, -+ ) -+ continue -+ } -+ // do not do anything if we are not done with processing all the shards or the number of shards is 1 -+ if req.Status != StatusProcessed { -+ continue -+ } -+ -+ var idxStart, idxEnd int -+ for i := range deleteGroups { -+ if req.RequestID == deleteGroups[i].RequestID { -+ idxStart = i -+ break -+ } -+ } -+ -+ for i := len(deleteGroups) - 1; i > 0; i-- { -+ if req.RequestID == deleteGroups[i].RequestID { -+ idxEnd = i -+ break -+ } -+ } -+ -+ // do not do anything if the number of shards is 1 -+ if idxStart == idxEnd { -+ continue -+ } -+ reqShards := deleteGroups[idxStart : idxEnd+1] -+ -+ level.Info(util_log.Logger).Log(""msg"", ""merging sharded request"", -+ ""request_id"", req.RequestID, -+ ""num_shards"", len(reqShards), -+ ""start_time"", req.StartTime.Unix(), -+ ""end_time"", req.EndTime.Unix(), -+ ) -+ if err := ds.mergeShardedRequests(ctx, req, reqShards); err != nil { -+ return err -+ } -+ } -+ -+ return nil -+} -+ - func parseDeleteRequestTimestamps(rangeValue []byte, deleteRequest DeleteRequest) (DeleteRequest, error) { - hexParts := strings.Split(string(rangeValue), "":"") - if len(hexParts) != 3 { -@@ -397,7 +503,7 @@ func parseDeleteRequestTimestamps(rangeValue []byte, deleteRequest DeleteRequest - } - - // An id is useful in managing delete requests --func generateUniqueID(orgID string, query string) []byte { -+func generateUniqueID(orgID string, query string) string { - uniqueID := fnv.New32() - _, _ = uniqueID.Write([]byte(orgID)) - -@@ -407,7 +513,7 @@ func generateUniqueID(orgID string, query string) []byte { - - _, _ = uniqueID.Write([]byte(query)) - -- return encodeUniqueID(uniqueID.Sum32()) -+ return string(encodeUniqueID(uniqueID.Sum32())) - } - - func encodeUniqueID(t uint32) []byte { -diff --git a/pkg/compactor/deletion/delete_requests_store_test.go b/pkg/compactor/deletion/delete_requests_store_test.go -index 62e6012ce02ad..6931b808f7a71 100644 ---- a/pkg/compactor/deletion/delete_requests_store_test.go -+++ b/pkg/compactor/deletion/delete_requests_store_test.go -@@ -21,23 +21,31 @@ func TestDeleteRequestsStore(t *testing.T) { - - // add requests for both the users to the store - for i := 0; i < len(tc.user1Requests); i++ { -- resp, err := tc.store.AddDeleteRequestGroup( -+ resp, err := tc.store.AddDeleteRequest( - context.Background(), -- []DeleteRequest{tc.user1Requests[i]}, -+ tc.user1Requests[i].UserID, -+ tc.user1Requests[i].Query, -+ tc.user1Requests[i].StartTime, -+ tc.user1Requests[i].EndTime, -+ 0, - ) - require.NoError(t, err) -- tc.user1Requests[i] = resp[0] -+ tc.user1Requests[i].RequestID = resp - -- resp, err = tc.store.AddDeleteRequestGroup( -+ resp, err = tc.store.AddDeleteRequest( - context.Background(), -- []DeleteRequest{tc.user2Requests[i]}, -+ tc.user2Requests[i].UserID, -+ tc.user2Requests[i].Query, -+ tc.user2Requests[i].StartTime, -+ tc.user2Requests[i].EndTime, -+ 0, - ) - require.NoError(t, err) -- tc.user2Requests[i] = resp[0] -+ tc.user2Requests[i].RequestID = resp - } - - // get all requests with StatusReceived and see if they have expected values -- deleteRequests, err := tc.store.GetDeleteRequestsByStatus(context.Background(), StatusReceived) -+ deleteRequests, err := tc.store.GetUnprocessedShards(context.Background()) - require.NoError(t, err) - compareRequests(t, append(tc.user1Requests, tc.user2Requests...), deleteRequests) - -@@ -60,14 +68,13 @@ func TestDeleteRequestsStore(t *testing.T) { - - // get individual delete requests by id and see if they have expected values - for _, expectedRequest := range append(user1Requests, user2Requests...) { -- actualRequest, err := tc.store.GetDeleteRequestGroup(context.Background(), expectedRequest.UserID, expectedRequest.RequestID) -+ actualRequest, err := tc.store.GetDeleteRequest(context.Background(), expectedRequest.UserID, expectedRequest.RequestID) - require.NoError(t, err) -- require.Len(t, actualRequest, 1) -- require.Equal(t, expectedRequest, actualRequest[0]) -+ require.Equal(t, expectedRequest, actualRequest) - } - - // try a non-existent request and see if it throws ErrDeleteRequestNotFound -- _, err = tc.store.GetDeleteRequestGroup(context.Background(), ""user3"", ""na"") -+ _, err = tc.store.GetDeleteRequest(context.Background(), ""user3"", ""na"") - require.ErrorIs(t, err, ErrDeleteRequestNotFound) - - // update some of the delete requests for both the users to processed -@@ -81,7 +88,7 @@ func TestDeleteRequestsStore(t *testing.T) { - request = tc.user2Requests[i] - } - -- require.NoError(t, tc.store.UpdateStatus(context.Background(), request, StatusProcessed)) -+ require.NoError(t, tc.store.MarkShardAsProcessed(context.Background(), request)) - } - - // see if requests in the store have right values -@@ -116,11 +123,11 @@ func TestDeleteRequestsStore(t *testing.T) { - remainingRequests = append(remainingRequests, tc.user1Requests[i]) - } - -- require.NoError(t, tc.store.RemoveDeleteRequests(context.Background(), []DeleteRequest{request})) -+ require.NoError(t, tc.store.RemoveDeleteRequest(context.Background(), request.UserID, request.RequestID)) - } - - // see if the store has the right remaining requests -- deleteRequests, err = tc.store.GetDeleteRequestsByStatus(context.Background(), StatusReceived) -+ deleteRequests, err = tc.store.GetUnprocessedShards(context.Background()) - require.NoError(t, err) - compareRequests(t, remainingRequests, deleteRequests) - -@@ -138,42 +145,37 @@ func TestBatchCreateGet(t *testing.T) { - tc := setup(t) - defer tc.store.Stop() - -- requests, err := tc.store.AddDeleteRequestGroup(context.Background(), tc.user1Requests) -+ reqID, err := tc.store.AddDeleteRequest(context.Background(), user1, `{foo=""bar""}`, now.Add(-24*time.Hour), now, time.Hour) -+ require.NoError(t, err) -+ -+ requests, err := tc.store.getDeleteRequestGroup(context.Background(), user1, reqID) - require.NoError(t, err) - - for i, req := range requests { - require.Equal(t, req.RequestID, requests[0].RequestID) - require.Equal(t, req.Status, requests[0].Status) - require.Equal(t, req.CreatedAt, requests[0].CreatedAt) -+ require.Equal(t, req.Query, requests[0].Query) -+ require.Equal(t, req.UserID, requests[0].UserID) - - require.Equal(t, req.SequenceNum, int64(i)) - } - }) - -- t.Run(""returns all the requests that share a request id"", func(t *testing.T) { -+ t.Run(""updates a single request with a new status"", func(t *testing.T) { - tc := setup(t) - defer tc.store.Stop() - -- savedRequests, err := tc.store.AddDeleteRequestGroup(context.Background(), tc.user1Requests) -+ reqID, err := tc.store.AddDeleteRequest(context.Background(), user1, `{foo=""bar""}`, now.Add(-24*time.Hour), now, time.Hour) - require.NoError(t, err) - -- results, err := tc.store.GetDeleteRequestGroup(context.Background(), savedRequests[0].UserID, savedRequests[0].RequestID) -+ savedRequests, err := tc.store.getDeleteRequestGroup(context.Background(), user1, reqID) - require.NoError(t, err) - -- compareRequests(t, savedRequests, results) -- }) -- -- t.Run(""updates a single request with a new status"", func(t *testing.T) { -- tc := setup(t) -- defer tc.store.Stop() -- -- savedRequests, err := tc.store.AddDeleteRequestGroup(context.Background(), tc.user1Requests) -- require.NoError(t, err) -- -- err = tc.store.UpdateStatus(context.Background(), savedRequests[1], StatusProcessed) -+ err = tc.store.MarkShardAsProcessed(context.Background(), savedRequests[1]) - require.NoError(t, err) - -- results, err := tc.store.GetDeleteRequestGroup(context.Background(), savedRequests[0].UserID, savedRequests[0].RequestID) -+ results, err := tc.store.getDeleteRequestGroup(context.Background(), savedRequests[0].UserID, savedRequests[0].RequestID) - require.NoError(t, err) - - require.Equal(t, StatusProcessed, results[1].Status) -@@ -183,18 +185,149 @@ func TestBatchCreateGet(t *testing.T) { - tc := setup(t) - defer tc.store.Stop() - -- savedRequests, err := tc.store.AddDeleteRequestGroup(context.Background(), tc.user1Requests) -+ reqID, err := tc.store.AddDeleteRequest(context.Background(), user1, `{foo=""bar""}`, now.Add(-24*time.Hour), now, time.Hour) - require.NoError(t, err) - -- err = tc.store.RemoveDeleteRequests(context.Background(), savedRequests) -+ err = tc.store.RemoveDeleteRequest(context.Background(), user1, reqID) - require.NoError(t, err) - -- results, err := tc.store.GetDeleteRequestGroup(context.Background(), savedRequests[0].UserID, savedRequests[0].RequestID) -+ results, err := tc.store.GetDeleteRequest(context.Background(), user1, reqID) - require.ErrorIs(t, err, ErrDeleteRequestNotFound) - require.Empty(t, results) - }) - } - -+func TestDeleteRequestsStore_MergeShardedRequests(t *testing.T) { -+ for _, tc := range []struct { -+ name string -+ reqsToAdd []storeAddReqDetails -+ shouldMarkProcessed func(DeleteRequest) bool -+ requestsShouldBeMerged bool -+ }{ -+ { -+ name: ""no requests in store"", -+ }, -+ { -+ name: ""none of the requests are processed - should not merge"", -+ reqsToAdd: []storeAddReqDetails{ -+ { -+ userID: user1, -+ query: `{foo=""bar""}`, -+ startTime: now.Add(-24 * time.Hour), -+ endTime: now, -+ shardByInterval: time.Hour, -+ }, -+ }, -+ shouldMarkProcessed: func(_ DeleteRequest) bool { -+ return false -+ }, -+ }, -+ { -+ name: ""not all requests are processed - should not merge"", -+ reqsToAdd: []storeAddReqDetails{ -+ { -+ userID: user1, -+ query: `{foo=""bar""}`, -+ startTime: now.Add(-24 * time.Hour), -+ endTime: now, -+ shardByInterval: time.Hour, -+ }, -+ }, -+ shouldMarkProcessed: func(request DeleteRequest) bool { -+ return request.SequenceNum%2 == 0 -+ }, -+ }, -+ { -+ name: ""all the requests are processed - should merge"", -+ reqsToAdd: []storeAddReqDetails{ -+ { -+ userID: user1, -+ query: `{foo=""bar""}`, -+ startTime: now.Add(-24 * time.Hour), -+ endTime: now, -+ shardByInterval: time.Hour, -+ }, -+ }, -+ shouldMarkProcessed: func(_ DeleteRequest) bool { -+ return true -+ }, -+ requestsShouldBeMerged: true, -+ }, -+ { // build requests for 2 different users and mark all requests as processed for just one of the two -+ name: ""merging requests from one user should not touch another users requests"", -+ reqsToAdd: []storeAddReqDetails{ -+ { -+ userID: user1, -+ query: `{foo=""bar""}`, -+ startTime: now.Add(-24 * time.Hour), -+ endTime: now, -+ shardByInterval: time.Hour, -+ }, -+ { -+ userID: user2, -+ query: `{foo=""bar""}`, -+ startTime: now.Add(-24 * time.Hour), -+ endTime: now, -+ shardByInterval: time.Hour, -+ }, -+ }, -+ shouldMarkProcessed: func(request DeleteRequest) bool { -+ return request.UserID == user2 -+ }, -+ }, -+ } { -+ t.Run(tc.name, func(t *testing.T) { -+ tempDir := t.TempDir() -+ -+ workingDir := filepath.Join(tempDir, ""working-dir"") -+ objectStorePath := filepath.Join(tempDir, ""object-store"") -+ -+ objectClient, err := local.NewFSObjectClient(local.FSConfig{ -+ Directory: objectStorePath, -+ }) -+ require.NoError(t, err) -+ ds, err := NewDeleteStore(workingDir, storage.NewIndexStorageClient(objectClient, """")) -+ require.NoError(t, err) -+ -+ for _, addReqDetails := range tc.reqsToAdd { -+ _, err := ds.AddDeleteRequest(context.Background(), addReqDetails.userID, addReqDetails.query, addReqDetails.startTime, addReqDetails.endTime, addReqDetails.shardByInterval) -+ require.NoError(t, err) -+ } -+ -+ reqs, err := ds.GetAllShards(context.Background()) -+ require.NoError(t, err) -+ -+ for _, req := range reqs { -+ if !tc.shouldMarkProcessed(req) { -+ continue -+ } -+ require.NoError(t, ds.MarkShardAsProcessed(context.Background(), req)) -+ } -+ -+ inStoreReqs, err := ds.GetAllDeleteRequestsForUser(context.Background(), user1) -+ require.NoError(t, err) -+ -+ require.NoError(t, ds.MergeShardedRequests(context.Background())) -+ inStoreReqsAfterMerging, err := ds.GetAllDeleteRequestsForUser(context.Background(), user1) -+ require.NoError(t, err) -+ -+ if tc.requestsShouldBeMerged { -+ require.Len(t, inStoreReqsAfterMerging, 1) -+ require.True(t, requestsAreEqual(inStoreReqsAfterMerging[0], DeleteRequest{ -+ UserID: user1, -+ Query: tc.reqsToAdd[0].query, -+ StartTime: tc.reqsToAdd[0].startTime, -+ EndTime: tc.reqsToAdd[len(tc.reqsToAdd)-1].endTime, -+ Status: StatusProcessed, -+ })) -+ } else { -+ require.Len(t, inStoreReqsAfterMerging, len(inStoreReqs)) -+ require.Equal(t, inStoreReqs, inStoreReqsAfterMerging) -+ } -+ }) -+ } -+} -+ - func compareRequests(t *testing.T, expected []DeleteRequest, actual []DeleteRequest) { - require.Len(t, actual, len(expected)) - sort.Slice(expected, func(i, j int) bool { -diff --git a/pkg/compactor/deletion/delete_requests_table.go b/pkg/compactor/deletion/delete_requests_table.go -index 528a3772641e2..970f8a88195cd 100644 ---- a/pkg/compactor/deletion/delete_requests_table.go -+++ b/pkg/compactor/deletion/delete_requests_table.go -@@ -33,7 +33,10 @@ type deleteRequestsTable struct { - wg sync.WaitGroup - } - --const deleteRequestsIndexFileName = DeleteRequestsTableName + "".gz"" -+const ( -+ deleteRequestsIndexFileName = DeleteRequestsTableName + "".gz"" -+ deleteRequestsSQLiteFileName = DeleteRequestsTableName + "".sqlite.gz"" -+) - - func newDeleteRequestsTable(workingDirectory string, indexStorageClient storage.Client) (index.Client, error) { - dbPath := filepath.Join(workingDirectory, DeleteRequestsTableName, DeleteRequestsTableName) -diff --git a/pkg/compactor/deletion/noop_delete_requests_store.go b/pkg/compactor/deletion/noop_delete_requests_store.go -index 433578c02b5e3..2683cd6fae370 100644 ---- a/pkg/compactor/deletion/noop_delete_requests_store.go -+++ b/pkg/compactor/deletion/noop_delete_requests_store.go -@@ -2,6 +2,9 @@ package deletion - - import ( - ""context"" -+ ""time"" -+ -+ ""github.com/prometheus/common/model"" - ) - - func NewNoOpDeleteRequestsStore() DeleteRequestsStore { -@@ -10,19 +13,27 @@ func NewNoOpDeleteRequestsStore() DeleteRequestsStore { - - type noOpDeleteRequestsStore struct{} - --func (d *noOpDeleteRequestsStore) GetAllDeleteRequests(_ context.Context) ([]DeleteRequest, error) { -+func (d *noOpDeleteRequestsStore) GetDeleteRequest(_ context.Context, _, _ string) (DeleteRequest, error) { -+ return DeleteRequest{}, nil -+} -+ -+func (d *noOpDeleteRequestsStore) GetAllRequests(_ context.Context) ([]DeleteRequest, error) { -+ return nil, nil -+} -+ -+func (d *noOpDeleteRequestsStore) GetAllShards(_ context.Context) ([]DeleteRequest, error) { - return nil, nil - } - --func (d *noOpDeleteRequestsStore) MergeShardedRequests(_ context.Context, _ DeleteRequest, _ []DeleteRequest) error { -+func (d *noOpDeleteRequestsStore) MergeShardedRequests(_ context.Context) error { - return nil - } - --func (d *noOpDeleteRequestsStore) AddDeleteRequestGroup(_ context.Context, _ []DeleteRequest) ([]DeleteRequest, error) { -- return nil, nil -+func (d *noOpDeleteRequestsStore) AddDeleteRequest(_ context.Context, _, _ string, _, _ model.Time, _ time.Duration) (string, error) { -+ return """", nil - } - --func (d *noOpDeleteRequestsStore) GetDeleteRequestsByStatus(_ context.Context, _ DeleteRequestStatus) ([]DeleteRequest, error) { -+func (d *noOpDeleteRequestsStore) GetUnprocessedShards(_ context.Context) ([]DeleteRequest, error) { - return nil, nil - } - -@@ -30,7 +41,7 @@ func (d *noOpDeleteRequestsStore) GetAllDeleteRequestsForUser(_ context.Context, - return nil, nil - } - --func (d *noOpDeleteRequestsStore) UpdateStatus(_ context.Context, _ DeleteRequest, _ DeleteRequestStatus) error { -+func (d *noOpDeleteRequestsStore) MarkShardAsProcessed(_ context.Context, _ DeleteRequest) error { - return nil - } - -@@ -38,7 +49,7 @@ func (d *noOpDeleteRequestsStore) GetDeleteRequestGroup(_ context.Context, _, _ - return nil, nil - } - --func (d *noOpDeleteRequestsStore) RemoveDeleteRequests(_ context.Context, _ []DeleteRequest) error { -+func (d *noOpDeleteRequestsStore) RemoveDeleteRequest(_ context.Context, _ string, _ string) error { - return nil - } - -diff --git a/pkg/compactor/deletion/request_handler.go b/pkg/compactor/deletion/request_handler.go -index 448ab7a78dd8e..5d5d166f0406a 100644 ---- a/pkg/compactor/deletion/request_handler.go -+++ b/pkg/compactor/deletion/request_handler.go -@@ -27,14 +27,17 @@ type DeleteRequestHandler struct { - deleteRequestsStore DeleteRequestsStore - metrics *deleteRequestHandlerMetrics - maxInterval time.Duration -+ -+ deleteRequestCancelPeriod time.Duration - } - - // NewDeleteRequestHandler creates a DeleteRequestHandler --func NewDeleteRequestHandler(deleteStore DeleteRequestsStore, maxInterval time.Duration, registerer prometheus.Registerer) *DeleteRequestHandler { -+func NewDeleteRequestHandler(deleteStore DeleteRequestsStore, maxInterval, deleteRequestCancelPeriod time.Duration, registerer prometheus.Registerer) *DeleteRequestHandler { - deleteMgr := DeleteRequestHandler{ -- deleteRequestsStore: deleteStore, -- maxInterval: maxInterval, -- metrics: newDeleteRequestHandlerMetrics(registerer), -+ deleteRequestsStore: deleteStore, -+ maxInterval: maxInterval, -+ deleteRequestCancelPeriod: deleteRequestCancelPeriod, -+ metrics: newDeleteRequestHandlerMetrics(registerer), - } - - return &deleteMgr -@@ -79,23 +82,16 @@ func (dm *DeleteRequestHandler) AddDeleteRequestHandler(w http.ResponseWriter, r - } - } - -- deleteRequests := buildRequests(shardByInterval, query, userID, startTime, endTime) -- createdDeleteRequests, err := dm.deleteRequestsStore.AddDeleteRequestGroup(ctx, deleteRequests) -+ requestID, err := dm.deleteRequestsStore.AddDeleteRequest(ctx, userID, query, startTime, endTime, shardByInterval) - if err != nil { - level.Error(util_log.Logger).Log(""msg"", ""error adding delete request to the store"", ""err"", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - -- if len(createdDeleteRequests) == 0 { -- level.Error(util_log.Logger).Log(""msg"", ""zero delete requests created"", ""user"", userID, ""query"", query) -- http.Error(w, ""Zero delete requests were created due to an internal error. Please contact support."", http.StatusInternalServerError) -- return -- } -- - level.Info(util_log.Logger).Log( - ""msg"", ""delete request for user added"", -- ""delete_request_id"", createdDeleteRequests[0].RequestID, -+ ""delete_request_id"", requestID, - ""user"", userID, - ""query"", query, - ""interval"", shardByInterval.String(), -@@ -232,7 +228,7 @@ func (dm *DeleteRequestHandler) CancelDeleteRequestHandler(w http.ResponseWriter - - params := r.URL.Query() - requestID := params.Get(""request_id"") -- deleteRequests, err := dm.deleteRequestsStore.GetDeleteRequestGroup(ctx, userID, requestID) -+ deleteRequest, err := dm.deleteRequestsStore.GetDeleteRequest(ctx, userID, requestID) - if err != nil { - if errors.Is(err, ErrDeleteRequestNotFound) { - http.Error(w, ""could not find delete request with given id"", http.StatusNotFound) -@@ -244,18 +240,17 @@ func (dm *DeleteRequestHandler) CancelDeleteRequestHandler(w http.ResponseWriter - return - } - -- toDelete := filterProcessed(deleteRequests) -- if len(toDelete) == 0 { -+ if deleteRequest.Status == StatusProcessed { - http.Error(w, ""deletion of request which is in process or already processed is not allowed"", http.StatusBadRequest) - return - } - -- if len(toDelete) != len(deleteRequests) && params.Get(""force"") != ""true"" { -- http.Error(w, ""Unable to cancel partially completed delete request. To force, use the ?force query parameter"", http.StatusBadRequest) -+ if (deleteRequest.Status != StatusReceived || deleteRequest.CreatedAt.Add(dm.deleteRequestCancelPeriod).Before(model.Now())) && params.Get(""force"") != ""true"" { -+ http.Error(w, fmt.Sprintf(""Cancellation of partially completed delete request or delete request past the deadline of %s since its creation is not allowed. To force, use the ?force query parameter"", dm.deleteRequestCancelPeriod.String()), http.StatusBadRequest) - return - } - -- if err := dm.deleteRequestsStore.RemoveDeleteRequests(ctx, toDelete); err != nil { -+ if err := dm.deleteRequestsStore.RemoveDeleteRequest(ctx, userID, requestID); err != nil { - level.Error(util_log.Logger).Log(""msg"", ""error cancelling the delete request"", ""err"", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return -diff --git a/pkg/compactor/deletion/request_handler_test.go b/pkg/compactor/deletion/request_handler_test.go -index 805fde58a0cae..fa1e2596082ec 100644 ---- a/pkg/compactor/deletion/request_handler_test.go -+++ b/pkg/compactor/deletion/request_handler_test.go -@@ -21,7 +21,7 @@ import ( - func TestAddDeleteRequestHandler(t *testing.T) { - t.Run(""it adds the delete request to the store"", func(t *testing.T) { - store := &mockDeleteRequestsStore{} -- h := NewDeleteRequestHandler(store, 0, nil) -+ h := NewDeleteRequestHandler(store, 0, 0, nil) - - req := buildRequest(""org-id"", `{foo=""bar""}`, ""0000000000"", ""0000000001"") - -@@ -30,27 +30,15 @@ func TestAddDeleteRequestHandler(t *testing.T) { - - require.Equal(t, w.Code, http.StatusNoContent) - -- require.Equal(t, ""org-id"", store.addReqs[0].UserID) -- require.Equal(t, `{foo=""bar""}`, store.addReqs[0].Query) -- require.Equal(t, toTime(""0000000000""), store.addReqs[0].StartTime) -- require.Equal(t, toTime(""0000000001""), store.addReqs[0].EndTime) -- }) -- -- t.Run(""an error is returned if adding delete request group returned zero"", func(t *testing.T) { -- store := &mockDeleteRequestsStore{returnZeroDeleteRequests: true} -- h := NewDeleteRequestHandler(store, 0, nil) -- -- req := buildRequest(""org-id"", `{foo=""bar""}`, ""0000000000"", ""0000000001"") -- -- w := httptest.NewRecorder() -- h.AddDeleteRequestHandler(w, req) -- -- require.Equal(t, w.Code, http.StatusInternalServerError) -+ require.Equal(t, ""org-id"", store.addReq.userID) -+ require.Equal(t, `{foo=""bar""}`, store.addReq.query) -+ require.Equal(t, toTime(""0000000000""), store.addReq.startTime) -+ require.Equal(t, toTime(""0000000001""), store.addReq.endTime) - }) - - t.Run(""it only shards deletes with line filter based on a query param"", func(t *testing.T) { - store := &mockDeleteRequestsStore{} -- h := NewDeleteRequestHandler(store, 0, nil) -+ h := NewDeleteRequestHandler(store, 0, 0, nil) - - now := model.Now() - from := model.TimeFromUnix(now.Add(-3 * time.Hour).Unix()) -@@ -66,12 +54,12 @@ func TestAddDeleteRequestHandler(t *testing.T) { - h.AddDeleteRequestHandler(w, req) - - require.Equal(t, w.Code, http.StatusNoContent) -- verifyRequestSplits(t, from, to, maxInterval, store.addReqs) -+ require.Equal(t, maxInterval, store.addReq.shardByInterval) - }) - - t.Run(""it uses the default for sharding when the query param isn't present"", func(t *testing.T) { - store := &mockDeleteRequestsStore{} -- h := NewDeleteRequestHandler(store, time.Hour, nil) -+ h := NewDeleteRequestHandler(store, time.Hour, 0, nil) - - now := model.Now() - from := model.TimeFromUnix(now.Add(-3 * time.Hour).Unix()) -@@ -83,12 +71,12 @@ func TestAddDeleteRequestHandler(t *testing.T) { - h.AddDeleteRequestHandler(w, req) - - require.Equal(t, w.Code, http.StatusNoContent) -- verifyRequestSplits(t, from, to, time.Hour, store.addReqs) -+ require.Equal(t, time.Hour, store.addReq.shardByInterval) - }) - - t.Run(""it does not shard deletes without line filter"", func(t *testing.T) { - store := &mockDeleteRequestsStore{} -- h := NewDeleteRequestHandler(store, 0, nil) -+ h := NewDeleteRequestHandler(store, 0, 0, nil) - - from := model.TimeFromUnix(model.Now().Add(-3 * time.Hour).Unix()) - to := model.TimeFromUnix(from.Add(3 * time.Hour).Unix()) -@@ -102,14 +90,12 @@ func TestAddDeleteRequestHandler(t *testing.T) { - h.AddDeleteRequestHandler(w, req) - - require.Equal(t, w.Code, http.StatusNoContent) -- require.Len(t, store.addReqs, 1) -- require.Equal(t, from, store.addReqs[0].StartTime) -- require.Equal(t, to, store.addReqs[0].EndTime) -+ require.Equal(t, time.Duration(0), store.addReq.shardByInterval) - }) - - t.Run(""it works with RFC3339"", func(t *testing.T) { - store := &mockDeleteRequestsStore{} -- h := NewDeleteRequestHandler(store, 0, nil) -+ h := NewDeleteRequestHandler(store, 0, 0, nil) - - req := buildRequest(""org-id"", `{foo=""bar""}`, ""2006-01-02T15:04:05Z"", ""2006-01-03T15:04:05Z"") - -@@ -118,15 +104,15 @@ func TestAddDeleteRequestHandler(t *testing.T) { - - require.Equal(t, w.Code, http.StatusNoContent) - -- require.Equal(t, ""org-id"", store.addReqs[0].UserID) -- require.Equal(t, `{foo=""bar""}`, store.addReqs[0].Query) -- require.Equal(t, toTime(""1136214245""), store.addReqs[0].StartTime) -- require.Equal(t, toTime(""1136300645""), store.addReqs[0].EndTime) -+ require.Equal(t, ""org-id"", store.addReq.userID) -+ require.Equal(t, `{foo=""bar""}`, store.addReq.query) -+ require.Equal(t, toTime(""1136214245""), store.addReq.startTime) -+ require.Equal(t, toTime(""1136300645""), store.addReq.endTime) - }) - - t.Run(""it fills in end time if blank"", func(t *testing.T) { - store := &mockDeleteRequestsStore{} -- h := NewDeleteRequestHandler(store, 0, nil) -+ h := NewDeleteRequestHandler(store, 0, 0, nil) - - req := buildRequest(""org-id"", `{foo=""bar""}`, ""0000000000"", """") - -@@ -135,15 +121,15 @@ func TestAddDeleteRequestHandler(t *testing.T) { - - require.Equal(t, w.Code, http.StatusNoContent) - -- require.Equal(t, ""org-id"", store.addReqs[0].UserID) -- require.Equal(t, `{foo=""bar""}`, store.addReqs[0].Query) -- require.Equal(t, toTime(""0000000000""), store.addReqs[0].StartTime) -- require.InDelta(t, int64(model.Now()), int64(store.addReqs[0].EndTime), 1000) -+ require.Equal(t, ""org-id"", store.addReq.userID) -+ require.Equal(t, `{foo=""bar""}`, store.addReq.query) -+ require.Equal(t, toTime(""0000000000""), store.addReq.startTime) -+ require.InDelta(t, int64(model.Now()), int64(store.addReq.endTime), 1000) - }) - - t.Run(""it returns 500 when the delete store errors"", func(t *testing.T) { - store := &mockDeleteRequestsStore{addErr: errors.New(""something bad"")} -- h := NewDeleteRequestHandler(store, 0, nil) -+ h := NewDeleteRequestHandler(store, 0, 0, nil) - - req := buildRequest(""org-id"", `{foo=""bar""}`, ""0000000000"", ""0000000001"") - -@@ -153,7 +139,7 @@ func TestAddDeleteRequestHandler(t *testing.T) { - }) - - t.Run(""Validation"", func(t *testing.T) { -- h := NewDeleteRequestHandler(&mockDeleteRequestsStore{}, time.Minute, nil) -+ h := NewDeleteRequestHandler(&mockDeleteRequestsStore{}, time.Minute, 0, nil) - - for _, tc := range []struct { - orgID, query, startTime, endTime, interval, error string -@@ -192,13 +178,12 @@ func TestAddDeleteRequestHandler(t *testing.T) { - func TestCancelDeleteRequestHandler(t *testing.T) { - t.Run(""it removes unprocessed delete requests from the store when force is true"", func(t *testing.T) { - stored := []DeleteRequest{ -- {RequestID: ""test-request"", UserID: ""org-id"", Query: ""test-query"", SequenceNum: 0, Status: StatusProcessed}, -- {RequestID: ""test-request"", UserID: ""org-id"", Query: ""test-query"", SequenceNum: 1, Status: StatusReceived}, -+ {RequestID: ""test-request"", UserID: ""org-id"", Query: ""test-query"", SequenceNum: 1, Status: StatusReceived, CreatedAt: now.Add(-2 * time.Hour)}, - } - store := &mockDeleteRequestsStore{} - store.getResult = stored - -- h := NewDeleteRequestHandler(store, 0, nil) -+ h := NewDeleteRequestHandler(store, 0, time.Hour, nil) - - req := buildRequest(""org-id"", ``, """", """") - params := req.URL.Query() -@@ -213,7 +198,10 @@ func TestCancelDeleteRequestHandler(t *testing.T) { - - require.Equal(t, store.getUser, ""org-id"") - require.Equal(t, store.getID, ""test-request"") -- require.Equal(t, stored[1], store.removeReqs[0]) -+ require.Equal(t, removeReqDetails{ -+ userID: stored[0].UserID, -+ reqID: stored[0].RequestID, -+ }, store.removeReqs) - }) - - t.Run(""it returns an error when parts of the query have started to be processed"", func(t *testing.T) { -@@ -225,7 +213,7 @@ func TestCancelDeleteRequestHandler(t *testing.T) { - store := &mockDeleteRequestsStore{} - store.getResult = stored - -- h := NewDeleteRequestHandler(store, 0, nil) -+ h := NewDeleteRequestHandler(store, 0, 0, nil) - - req := buildRequest(""org-id"", ``, """", """") - params := req.URL.Query() -@@ -237,13 +225,13 @@ func TestCancelDeleteRequestHandler(t *testing.T) { - h.CancelDeleteRequestHandler(w, req) - - require.Equal(t, w.Code, http.StatusBadRequest) -- require.Equal(t, ""Unable to cancel partially completed delete request. To force, use the ?force query parameter\n"", w.Body.String()) -+ require.Equal(t, ""deletion of request which is in process or already processed is not allowed\n"", w.Body.String()) - }) - - t.Run(""error getting from store"", func(t *testing.T) { - store := &mockDeleteRequestsStore{} - store.getErr = errors.New(""something bad"") -- h := NewDeleteRequestHandler(store, 0, nil) -+ h := NewDeleteRequestHandler(store, 0, 0, nil) - - req := buildRequest(""orgid"", ``, """", """") - params := req.URL.Query() -@@ -258,12 +246,12 @@ func TestCancelDeleteRequestHandler(t *testing.T) { - }) - - t.Run(""error removing from the store"", func(t *testing.T) { -- stored := []DeleteRequest{{RequestID: ""test-request"", UserID: ""org-id"", Query: ""test-query"", Status: StatusReceived}} -+ stored := []DeleteRequest{{RequestID: ""test-request"", UserID: ""org-id"", Query: ""test-query"", Status: StatusReceived, CreatedAt: now}} - store := &mockDeleteRequestsStore{} - store.getResult = stored - store.removeErr = errors.New(""something bad"") - -- h := NewDeleteRequestHandler(store, 0, nil) -+ h := NewDeleteRequestHandler(store, 0, time.Hour, nil) - - req := buildRequest(""org-id"", ``, """", """") - params := req.URL.Query() -@@ -279,7 +267,7 @@ func TestCancelDeleteRequestHandler(t *testing.T) { - - t.Run(""Validation"", func(t *testing.T) { - t.Run(""no org id"", func(t *testing.T) { -- h := NewDeleteRequestHandler(&mockDeleteRequestsStore{}, 0, nil) -+ h := NewDeleteRequestHandler(&mockDeleteRequestsStore{}, 0, 0, nil) - - req := buildRequest("""", ``, """", """") - params := req.URL.Query() -@@ -294,7 +282,7 @@ func TestCancelDeleteRequestHandler(t *testing.T) { - }) - - t.Run(""request not found"", func(t *testing.T) { -- h := NewDeleteRequestHandler(&mockDeleteRequestsStore{getErr: ErrDeleteRequestNotFound}, 0, nil) -+ h := NewDeleteRequestHandler(&mockDeleteRequestsStore{getErr: ErrDeleteRequestNotFound}, 0, 0, nil) - - req := buildRequest(""org-id"", ``, """", """") - params := req.URL.Query() -@@ -313,7 +301,7 @@ func TestCancelDeleteRequestHandler(t *testing.T) { - store := &mockDeleteRequestsStore{} - store.getResult = stored - -- h := NewDeleteRequestHandler(store, 0, nil) -+ h := NewDeleteRequestHandler(store, 0, 0, nil) - - req := buildRequest(""org-id"", ``, """", """") - params := req.URL.Query() -@@ -333,7 +321,7 @@ func TestGetAllDeleteRequestsHandler(t *testing.T) { - t.Run(""it gets all the delete requests for the user"", func(t *testing.T) { - store := &mockDeleteRequestsStore{} - store.getAllResult = []DeleteRequest{{RequestID: ""test-request-1"", Status: StatusReceived}, {RequestID: ""test-request-2"", Status: StatusReceived}} -- h := NewDeleteRequestHandler(store, 0, nil) -+ h := NewDeleteRequestHandler(store, 0, 0, nil) - - req := buildRequest(""org-id"", ``, """", """") - -@@ -356,7 +344,7 @@ func TestGetAllDeleteRequestsHandler(t *testing.T) { - {RequestID: ""test-request-2"", CreatedAt: now.Add(time.Minute), StartTime: now.Add(30 * time.Minute), EndTime: now.Add(90 * time.Minute)}, - {RequestID: ""test-request-1"", CreatedAt: now, StartTime: now.Add(time.Hour), EndTime: now.Add(2 * time.Hour)}, - } -- h := NewDeleteRequestHandler(store, 0, nil) -+ h := NewDeleteRequestHandler(store, 0, 0, nil) - - req := buildRequest(""org-id"", ``, """", """") - -@@ -386,7 +374,7 @@ func TestGetAllDeleteRequestsHandler(t *testing.T) { - {RequestID: ""test-request-2"", CreatedAt: now.Add(time.Minute), Status: StatusProcessed}, - {RequestID: ""test-request-3"", CreatedAt: now.Add(2 * time.Minute), Status: StatusReceived}, - } -- h := NewDeleteRequestHandler(store, 0, nil) -+ h := NewDeleteRequestHandler(store, 0, 0, nil) - - req := buildRequest(""org-id"", ``, """", """") - -@@ -409,7 +397,7 @@ func TestGetAllDeleteRequestsHandler(t *testing.T) { - t.Run(""error getting from store"", func(t *testing.T) { - store := &mockDeleteRequestsStore{} - store.getAllErr = errors.New(""something bad"") -- h := NewDeleteRequestHandler(store, 0, nil) -+ h := NewDeleteRequestHandler(store, 0, 0, nil) - - req := buildRequest(""orgid"", ``, """", """") - params := req.URL.Query() -@@ -425,7 +413,7 @@ func TestGetAllDeleteRequestsHandler(t *testing.T) { - - t.Run(""validation"", func(t *testing.T) { - t.Run(""no org id"", func(t *testing.T) { -- h := NewDeleteRequestHandler(&mockDeleteRequestsStore{}, 0, nil) -+ h := NewDeleteRequestHandler(&mockDeleteRequestsStore{}, 0, 0, nil) - - req := buildRequest("""", ``, """", """")",chore,refactor delete requests store interface to prepare for replacing boltdb with sqlite for storing delete requests (#16181) -21b901db9ba99960fef4c288eb5dceec7b56da42,2021-04-03 17:04:52,Ed Welch,Loki: Distributor log message bodySize should always reflect the compressed size (#3572),False,"diff --git a/pkg/distributor/http.go b/pkg/distributor/http.go -index 8928ca066bad6..9248ffe5fa70e 100644 ---- a/pkg/distributor/http.go -+++ b/pkg/distributor/http.go -@@ -3,6 +3,7 @@ package distributor - import ( - ""compress/gzip"" - ""fmt"" -+ ""io"" - ""math"" - ""net/http"" - ""strings"" -@@ -107,24 +108,26 @@ func (d *Distributor) PushHandler(w http.ResponseWriter, r *http.Request) { - - func ParseRequest(logger gokit.Logger, userID string, r *http.Request) (*logproto.PushRequest, error) { - -- var body lokiutil.SizeReader -- -+ // Body -+ var body io.Reader -+ // bodySize should always reflect the compressed size of the request body -+ bodySize := lokiutil.NewSizeReader(r.Body) - contentEncoding := r.Header.Get(contentEnc) - switch contentEncoding { - case """": -- body = lokiutil.NewSizeReader(r.Body) -+ body = bodySize - case ""snappy"": - // Snappy-decoding is done by `util.ParseProtoReader(..., util.RawSnappy)` below. - // Pass on body bytes. Note: HTTP clients do not need to set this header, - // but they sometimes do. See #3407. -- body = lokiutil.NewSizeReader(r.Body) -+ body = bodySize - case ""gzip"": -- gzipReader, err := gzip.NewReader(r.Body) -+ gzipReader, err := gzip.NewReader(bodySize) - if err != nil { - return nil, err - } - defer gzipReader.Close() -- body = lokiutil.NewSizeReader(gzipReader) -+ body = gzipReader - default: - return nil, fmt.Errorf(""Content-Encoding %q not supported"", contentEncoding) - } -@@ -163,7 +166,7 @@ func ParseRequest(logger gokit.Logger, userID string, r *http.Request) (*logprot - ""path"", r.URL.Path, - ""contentType"", contentType, - ""contentEncoding"", contentEncoding, -- ""bodySize"", humanize.Bytes(uint64(body.Size())), -+ ""bodySize"", humanize.Bytes(uint64(bodySize.Size())), - ""streams"", len(req.Streams), - ""entries"", totalEntries, - ""streamLabelsSize"", humanize.Bytes(uint64(streamLabelsSize)),",Loki,Distributor log message bodySize should always reflect the compressed size (#3572) -096cf0cfaf3fe62cb1bcc86945229e64dfa326ed,2021-10-15 20:10:09,Callum Styan,"Update golang and loki-build-image image versions. (#4481) - -Signed-off-by: Callum Styan ",False,"diff --git a/clients/cmd/docker-driver/Dockerfile b/clients/cmd/docker-driver/Dockerfile -index 415350271b107..eef3604b8decb 100644 ---- a/clients/cmd/docker-driver/Dockerfile -+++ b/clients/cmd/docker-driver/Dockerfile -@@ -1,4 +1,4 @@ --ARG BUILD_IMAGE=grafana/loki-build-image:0.12.0 -+ARG BUILD_IMAGE=grafana/loki-build-image:0.18.0 - # Directories in this file are referenced from the root of the project not this folder - # This file is intended to be called from the root like so: - # docker build -t grafana/loki -f cmd/loki/Dockerfile . -diff --git a/clients/cmd/fluent-bit/Dockerfile b/clients/cmd/fluent-bit/Dockerfile -index 4974c9b718df5..db2ef30c4fe33 100644 ---- a/clients/cmd/fluent-bit/Dockerfile -+++ b/clients/cmd/fluent-bit/Dockerfile -@@ -1,4 +1,4 @@ --FROM golang:1.16.2 as build -+FROM golang:1.17.2 as build - COPY . /src/loki - WORKDIR /src/loki - RUN make clean && make BUILD_IN_CONTAINER=false fluent-bit-plugin -diff --git a/clients/cmd/promtail/Dockerfile b/clients/cmd/promtail/Dockerfile -index 89628775a7342..c2420beae912d 100644 ---- a/clients/cmd/promtail/Dockerfile -+++ b/clients/cmd/promtail/Dockerfile -@@ -1,4 +1,4 @@ --FROM golang:1.16.2-buster as build -+FROM golang:1.17.2-buster as build - # TOUCH_PROTOS signifies if we should touch the compiled proto files and thus not regenerate them. - # This is helpful when file system timestamps can't be trusted with make - ARG TOUCH_PROTOS -diff --git a/clients/cmd/promtail/Dockerfile.arm32 b/clients/cmd/promtail/Dockerfile.arm32 -index 8aebbaa0df93e..0a5e8c7590907 100644 ---- a/clients/cmd/promtail/Dockerfile.arm32 -+++ b/clients/cmd/promtail/Dockerfile.arm32 -@@ -1,4 +1,4 @@ --FROM golang:1.16.2 as build -+FROM golang:1.17.2 as build - # TOUCH_PROTOS signifies if we should touch the compiled proto files and thus not regenerate them. - # This is helpful when file system timestamps can't be trusted with make - ARG TOUCH_PROTOS -diff --git a/clients/cmd/promtail/Dockerfile.cross b/clients/cmd/promtail/Dockerfile.cross -index a15e5f35bcd1d..e267ce94cf73a 100644 ---- a/clients/cmd/promtail/Dockerfile.cross -+++ b/clients/cmd/promtail/Dockerfile.cross -@@ -1,8 +1,8 @@ --ARG BUILD_IMAGE=grafana/loki-build-image:0.12.0 -+ARG BUILD_IMAGE=grafana/loki-build-image:0.18.0 - # Directories in this file are referenced from the root of the project not this folder - # This file is intended to be called from the root like so: - # docker build -t grafana/promtail -f clients/cmd/promtail/Dockerfile . --FROM golang:1.16.2-alpine as goenv -+FROM golang:1.17.2-alpine as goenv - RUN go env GOARCH > /goarch && \ - go env GOARM > /goarm - -diff --git a/cmd/logcli/Dockerfile b/cmd/logcli/Dockerfile -index 1c94f5f4e0e08..d804f41128099 100644 ---- a/cmd/logcli/Dockerfile -+++ b/cmd/logcli/Dockerfile -@@ -1,4 +1,4 @@ --FROM golang:1.16.2 as build -+FROM golang:1.17.2 as build - - ARG TOUCH_PROTOS - COPY . /src/loki -diff --git a/cmd/loki-canary/Dockerfile b/cmd/loki-canary/Dockerfile -index 05b37e09f4d87..e5aa0b6a8c805 100644 ---- a/cmd/loki-canary/Dockerfile -+++ b/cmd/loki-canary/Dockerfile -@@ -1,4 +1,4 @@ --FROM golang:1.16.2 as build -+FROM golang:1.17.2 as build - # TOUCH_PROTOS signifies if we should touch the compiled proto files and thus not regenerate them. - # This is helpful when file system timestamps can't be trusted with make - ARG TOUCH_PROTOS -diff --git a/cmd/loki-canary/Dockerfile.cross b/cmd/loki-canary/Dockerfile.cross -index 2da868348e500..a0164262d3541 100644 ---- a/cmd/loki-canary/Dockerfile.cross -+++ b/cmd/loki-canary/Dockerfile.cross -@@ -1,8 +1,8 @@ --ARG BUILD_IMAGE=grafana/loki-build-image:0.12.0 -+ARG BUILD_IMAGE=grafana/loki-build-image:0.18.0 - # Directories in this file are referenced from the root of the project not this folder - # This file is intended to be called from the root like so: - # docker build -t grafana/promtail -f cmd/promtail/Dockerfile . --FROM golang:1.16.2-alpine as goenv -+FROM golang:1.17.2-alpine as goenv - RUN go env GOARCH > /goarch && \ - go env GOARM > /goarm - -diff --git a/cmd/loki/Dockerfile b/cmd/loki/Dockerfile -index 2de844681a766..81b69d4ef4c82 100644 ---- a/cmd/loki/Dockerfile -+++ b/cmd/loki/Dockerfile -@@ -1,4 +1,4 @@ --FROM golang:1.16.2 as build -+FROM golang:1.17.2 as build - # TOUCH_PROTOS signifies if we should touch the compiled proto files and thus not regenerate them. - # This is helpful when file system timestamps can't be trusted with make - ARG TOUCH_PROTOS -diff --git a/cmd/loki/Dockerfile.cross b/cmd/loki/Dockerfile.cross -index 24a2627e4a804..013d3e3fa5ccc 100644 ---- a/cmd/loki/Dockerfile.cross -+++ b/cmd/loki/Dockerfile.cross -@@ -1,8 +1,8 @@ --ARG BUILD_IMAGE=grafana/loki-build-image:0.12.0 -+ARG BUILD_IMAGE=grafana/loki-build-image:0.18.0 - # Directories in this file are referenced from the root of the project not this folder - # This file is intended to be called from the root like so: - # docker build -t grafana/loki -f cmd/loki/Dockerfile . --FROM golang:1.16.2-alpine as goenv -+FROM golang:1.17.2-alpine as goenv - RUN go env GOARCH > /goarch && \ - go env GOARM > /goarm - -diff --git a/cmd/migrate/Dockerfile b/cmd/migrate/Dockerfile -index 0f8c2c0af060f..581ac18ecd1b6 100644 ---- a/cmd/migrate/Dockerfile -+++ b/cmd/migrate/Dockerfile -@@ -1,4 +1,4 @@ --FROM golang:1.16.2 as build -+FROM golang:1.17.2 as build - COPY . /src/loki - WORKDIR /src/loki - RUN make clean && make BUILD_IN_CONTAINER=false migrate -diff --git a/cmd/querytee/Dockerfile b/cmd/querytee/Dockerfile -index ed4eb77e372ad..ec873807529aa 100644 ---- a/cmd/querytee/Dockerfile -+++ b/cmd/querytee/Dockerfile -@@ -1,4 +1,4 @@ --FROM golang:1.16.2 as build -+FROM golang:1.17.2 as build - - ARG TOUCH_PROTOS - COPY . /src/loki -diff --git a/cmd/querytee/Dockerfile.cross b/cmd/querytee/Dockerfile.cross -index 6aee7e8711a6e..bdc41cb6e27bb 100644 ---- a/cmd/querytee/Dockerfile.cross -+++ b/cmd/querytee/Dockerfile.cross -@@ -1,8 +1,8 @@ --ARG BUILD_IMAGE=grafana/loki-build-image:0.9.1 -+ARG BUILD_IMAGE=grafana/loki-build-image:0.18.0 - # Directories in this file are referenced from the root of the project not this folder - # This file is intended to be called from the root like so: - # docker build -t grafana/promtail -f cmd/promtail/Dockerfile . --FROM golang:1.16.2-alpine as goenv -+FROM golang:1.17.2-alpine as goenv - RUN go env GOARCH > /goarch && \ - go env GOARM > /goarm - -diff --git a/tools/dev/loki-boltdb-storage-s3/dev.dockerfile b/tools/dev/loki-boltdb-storage-s3/dev.dockerfile -index fe6293ff53295..a804bc26f4d74 100644 ---- a/tools/dev/loki-boltdb-storage-s3/dev.dockerfile -+++ b/tools/dev/loki-boltdb-storage-s3/dev.dockerfile -@@ -1,4 +1,4 @@ --FROM golang:1.16 -+FROM golang:1.17 - ENV CGO_ENABLED=0 - RUN go get github.com/go-delve/delve/cmd/dlv",unknown,"Update golang and loki-build-image image versions. (#4481) - -Signed-off-by: Callum Styan " -7983f94b15b422b94517641bd9cec5c9da6903e1,2024-10-03 20:15:50,Trevor Whitney,feat: detected field values (#14350),False,"diff --git a/cmd/logcli/main.go b/cmd/logcli/main.go -index 32539e3c83355..976395cd4f420 100644 ---- a/cmd/logcli/main.go -+++ b/cmd/logcli/main.go -@@ -692,7 +692,7 @@ func newVolumeQuery(rangeQuery bool, cmd *kingpin.CmdClause) *volume.Query { - - func newDetectedFieldsQuery(cmd *kingpin.CmdClause) *detected.FieldsQuery { - // calculate query range from cli params -- var from, to string -+ var fieldName, from, to string - var since time.Duration - - q := &detected.FieldsQuery{} -@@ -705,24 +705,28 @@ func newDetectedFieldsQuery(cmd *kingpin.CmdClause) *detected.FieldsQuery { - q.Start = mustParse(from, defaultStart) - q.End = mustParse(to, defaultEnd) - -+ q.FieldName = fieldName -+ - q.Quiet = *quiet - - return nil - }) - -- cmd.Flag(""field-limit"", ""Limit on number of fields to return.""). -+ cmd.Flag(""limit"", ""Limit on number of fields or values to return.""). - Default(""100""). -- IntVar(&q.FieldLimit) -+ IntVar(&q.Limit) - cmd.Flag(""line-limit"", ""Limit the number of lines each subquery is allowed to process.""). - Default(""1000""). - IntVar(&q.LineLimit) - cmd.Arg(""query"", ""eg '{foo=\""bar\"",baz=~\"".*blip\""} |~ \"".*error.*\""'""). - Required(). - StringVar(&q.QueryString) -+ cmd.Arg(""field"", ""The name of the field."").Default("""").StringVar(&fieldName) - cmd.Flag(""since"", ""Lookback window."").Default(""1h"").DurationVar(&since) - cmd.Flag(""from"", ""Start looking for logs at this absolute time (inclusive)"").StringVar(&from) - cmd.Flag(""to"", ""Stop looking for logs at this absolute time (exclusive)"").StringVar(&to) - cmd.Flag(""step"", ""Query resolution step width, for metric queries. Evaluate the query at the specified step over the time range.""). -+ Default(""10s""). - DurationVar(&q.Step) - - return q -diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md -index b74a559b12e7f..c8d5f7bbbd107 100644 ---- a/docs/sources/setup/install/helm/reference.md -+++ b/docs/sources/setup/install/helm/reference.md -@@ -5640,6 +5640,7 @@ null - ""/loki/api/v1/index/volume"", - ""/loki/api/v1/index/volume_range"", - ""/loki/api/v1/format_query"", -+ ""/loki/api/v1/detected_field"", - ""/loki/api/v1/detected_fields"", - ""/loki/api/v1/detected_labels"", - ""/loki/api/v1/patterns"" -@@ -5702,6 +5703,7 @@ null - ""/loki/api/v1/index/volume"", - ""/loki/api/v1/index/volume_range"", - ""/loki/api/v1/format_query"", -+ ""/loki/api/v1/detected_field"", - ""/loki/api/v1/detected_fields"", - ""/loki/api/v1/detected_labels"", - ""/loki/api/v1/patterns"" -diff --git a/pkg/ingester-rf1/ingester.go b/pkg/ingester-rf1/ingester.go -index 583aa6494e77c..d00a95f38e5b5 100644 ---- a/pkg/ingester-rf1/ingester.go -+++ b/pkg/ingester-rf1/ingester.go -@@ -881,6 +881,6 @@ func (i *Ingester) GetDetectedFields(_ context.Context, r *logproto.DetectedFiel - Cardinality: 1, - }, - }, -- FieldLimit: r.GetFieldLimit(), -+ Limit: r.GetLimit(), - }, nil - } -diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go -index 8672d5f4cad76..1f3a39415fa3f 100644 ---- a/pkg/ingester/ingester.go -+++ b/pkg/ingester/ingester.go -@@ -1595,7 +1595,7 @@ func (i *Ingester) GetDetectedFields(_ context.Context, r *logproto.DetectedFiel - Cardinality: 1, - }, - }, -- FieldLimit: r.GetFieldLimit(), -+ Limit: r.GetLimit(), - }, nil - } - -diff --git a/pkg/logcli/client/client.go b/pkg/logcli/client/client.go -index 531ef15f61ad5..f2d42b353f969 100644 ---- a/pkg/logcli/client/client.go -+++ b/pkg/logcli/client/client.go -@@ -28,17 +28,18 @@ import ( - ) - - const ( -- queryPath = ""/loki/api/v1/query"" -- queryRangePath = ""/loki/api/v1/query_range"" -- labelsPath = ""/loki/api/v1/labels"" -- labelValuesPath = ""/loki/api/v1/label/%s/values"" -- seriesPath = ""/loki/api/v1/series"" -- tailPath = ""/loki/api/v1/tail"" -- statsPath = ""/loki/api/v1/index/stats"" -- volumePath = ""/loki/api/v1/index/volume"" -- volumeRangePath = ""/loki/api/v1/index/volume_range"" -- detectedFieldsPath = ""/loki/api/v1/detected_fields"" -- defaultAuthHeader = ""Authorization"" -+ queryPath = ""/loki/api/v1/query"" -+ queryRangePath = ""/loki/api/v1/query_range"" -+ labelsPath = ""/loki/api/v1/labels"" -+ labelValuesPath = ""/loki/api/v1/label/%s/values"" -+ seriesPath = ""/loki/api/v1/series"" -+ tailPath = ""/loki/api/v1/tail"" -+ statsPath = ""/loki/api/v1/index/stats"" -+ volumePath = ""/loki/api/v1/index/volume"" -+ volumeRangePath = ""/loki/api/v1/index/volume_range"" -+ detectedFieldsPath = ""/loki/api/v1/detected_fields"" -+ detectedFieldValuesPath = ""/loki/api/v1/detected_field/%s/values"" -+ defaultAuthHeader = ""Authorization"" - - // HTTP header keys - HTTPScopeOrgID = ""X-Scope-OrgID"" -@@ -61,7 +62,7 @@ type Client interface { - GetStats(queryStr string, start, end time.Time, quiet bool) (*logproto.IndexStatsResponse, error) - GetVolume(query *volume.Query) (*loghttp.QueryResponse, error) - GetVolumeRange(query *volume.Query) (*loghttp.QueryResponse, error) -- GetDetectedFields(queryStr string, fieldLimit, lineLimit int, start, end time.Time, step time.Duration, quiet bool) (*loghttp.DetectedFieldsResponse, error) -+ GetDetectedFields(queryStr, fieldName string, fieldLimit, lineLimit int, start, end time.Time, step time.Duration, quiet bool) (*loghttp.DetectedFieldsResponse, error) - } - - // Tripperware can wrap a roundtripper. -@@ -234,15 +235,16 @@ func (c *DefaultClient) getVolume(path string, query *volume.Query) (*loghttp.Qu - } - - func (c *DefaultClient) GetDetectedFields( -- queryStr string, -- fieldLimit, lineLimit int, -+ queryStr, fieldName string, -+ limit, lineLimit int, - start, end time.Time, - step time.Duration, - quiet bool, - ) (*loghttp.DetectedFieldsResponse, error) { -+ - qsb := util.NewQueryStringBuilder() - qsb.SetString(""query"", queryStr) -- qsb.SetInt(""field_limit"", int64(fieldLimit)) -+ qsb.SetInt(""limit"", int64(limit)) - qsb.SetInt(""line_limit"", int64(lineLimit)) - qsb.SetInt(""start"", start.UnixNano()) - qsb.SetInt(""end"", end.UnixNano()) -@@ -251,7 +253,12 @@ func (c *DefaultClient) GetDetectedFields( - var err error - var r loghttp.DetectedFieldsResponse - -- if err = c.doRequest(detectedFieldsPath, qsb.Encode(), quiet, &r); err != nil { -+ path := detectedFieldsPath -+ if fieldName != """" { -+ path = fmt.Sprintf(detectedFieldValuesPath, url.PathEscape(fieldName)) -+ } -+ -+ if err = c.doRequest(path, qsb.Encode(), quiet, &r); err != nil { - return nil, err - } - -diff --git a/pkg/logcli/client/file.go b/pkg/logcli/client/file.go -index 1267681d75c85..b1b97fd57b08a 100644 ---- a/pkg/logcli/client/file.go -+++ b/pkg/logcli/client/file.go -@@ -207,7 +207,7 @@ func (f *FileClient) GetVolumeRange(_ *volume.Query) (*loghttp.QueryResponse, er - } - - func (f *FileClient) GetDetectedFields( -- _ string, -+ _, _ string, - _, _ int, - _, _ time.Time, - _ time.Duration, -diff --git a/pkg/logcli/detected/fields.go b/pkg/logcli/detected/fields.go -index f8ba585ea2a00..cca74f11497b1 100644 ---- a/pkg/logcli/detected/fields.go -+++ b/pkg/logcli/detected/fields.go -@@ -18,10 +18,11 @@ type FieldsQuery struct { - QueryString string - Start time.Time - End time.Time -- FieldLimit int -+ Limit int - LineLimit int - Step time.Duration - Quiet bool -+ FieldName string - ColoredOutput bool - } - -@@ -30,7 +31,16 @@ func (q *FieldsQuery) Do(c client.Client, outputMode string) { - var resp *loghttp.DetectedFieldsResponse - var err error - -- resp, err = c.GetDetectedFields(q.QueryString, q.FieldLimit, q.LineLimit, q.Start, q.End, q.Step, q.Quiet) -+ resp, err = c.GetDetectedFields( -+ q.QueryString, -+ q.FieldName, -+ q.Limit, -+ q.LineLimit, -+ q.Start, -+ q.End, -+ q.Step, -+ q.Quiet, -+ ) - if err != nil { - log.Fatalf(""Error doing request: %+v"", err) - } -@@ -43,12 +53,17 @@ func (q *FieldsQuery) Do(c client.Client, outputMode string) { - } - fmt.Println(string(out)) - default: -- output := make([]string, len(resp.Fields)) -- for i, field := range resp.Fields { -- bold := color.New(color.Bold) -- output[i] = fmt.Sprintf(""label: %s\t\t"", bold.Sprintf(""%s"", field.Label)) + -- fmt.Sprintf(""type: %s\t\t"", bold.Sprintf(""%s"", field.Type)) + -- fmt.Sprintf(""cardinality: %s"", bold.Sprintf(""%d"", field.Cardinality)) -+ var output []string -+ if len(resp.Fields) > 0 { -+ output = make([]string, len(resp.Fields)) -+ for i, field := range resp.Fields { -+ bold := color.New(color.Bold) -+ output[i] = fmt.Sprintf(""label: %s\t\t"", bold.Sprintf(""%s"", field.Label)) + -+ fmt.Sprintf(""type: %s\t\t"", bold.Sprintf(""%s"", field.Type)) + -+ fmt.Sprintf(""cardinality: %s"", bold.Sprintf(""%d"", field.Cardinality)) -+ } -+ } else if len(resp.Values) > 0 { -+ output = resp.Values - } - - slices.Sort(output) -diff --git a/pkg/logcli/query/query_test.go b/pkg/logcli/query/query_test.go -index 8e52134482b7c..35077968a1176 100644 ---- a/pkg/logcli/query/query_test.go -+++ b/pkg/logcli/query/query_test.go -@@ -486,7 +486,7 @@ func (t *testQueryClient) GetVolumeRange(_ *volume.Query) (*loghttp.QueryRespons - } - - func (t *testQueryClient) GetDetectedFields( -- _ string, -+ _, _ string, - _, _ int, - _, _ time.Time, - _ time.Duration, -diff --git a/pkg/loghttp/detected.go b/pkg/loghttp/detected.go -index 632ac7cd02410..26c263b8c638c 100644 ---- a/pkg/loghttp/detected.go -+++ b/pkg/loghttp/detected.go -@@ -5,6 +5,7 @@ import ""github.com/grafana/loki/v3/pkg/logproto"" - // LabelResponse represents the http json response to a label query - type DetectedFieldsResponse struct { - Fields []DetectedField `json:""fields,omitempty""` -+ Values []string `json:""values,omitempty""` - } - - type DetectedField struct { -diff --git a/pkg/loghttp/params.go b/pkg/loghttp/params.go -index 4f34992df592b..c32161d5bf5ec 100644 ---- a/pkg/loghttp/params.go -+++ b/pkg/loghttp/params.go -@@ -19,7 +19,7 @@ import ( - - const ( - defaultQueryLimit = 100 -- defaultFieldLimit = 1000 -+ defaultLimit = 1000 - defaultSince = 1 * time.Hour - defaultDirection = logproto.BACKWARD - ) -@@ -46,11 +46,18 @@ func lineLimit(r *http.Request) (uint32, error) { - return uint32(l), nil - } - --func fieldLimit(r *http.Request) (uint32, error) { -- l, err := parseInt(r.Form.Get(""field_limit""), defaultFieldLimit) -+func detectedFieldsLimit(r *http.Request) (uint32, error) { -+ limit := r.Form.Get(""limit"") -+ if limit == """" { -+ // for backwards compatability -+ limit = r.Form.Get(""field_limit"") -+ } -+ -+ l, err := parseInt(limit, defaultLimit) - if err != nil { - return 0, err - } -+ - if l <= 0 { - return 0, errors.New(""limit must be a positive value"") - } -diff --git a/pkg/loghttp/query.go b/pkg/loghttp/query.go -index af67b9df2d0a3..a2bce462aab80 100644 ---- a/pkg/loghttp/query.go -+++ b/pkg/loghttp/query.go -@@ -9,6 +9,7 @@ import ( - ""unsafe"" - - ""github.com/c2h5oh/datasize"" -+ ""github.com/gorilla/mux"" - ""github.com/grafana/jsonparser"" - json ""github.com/json-iterator/go"" - ""github.com/prometheus/common/model"" -@@ -650,6 +651,7 @@ func ParseDetectedFieldsQuery(r *http.Request) (*logproto.DetectedFieldsRequest, - result := &logproto.DetectedFieldsRequest{} - - result.Query = query(r) -+ result.Values, result.Name = values(r) - result.Start, result.End, err = bounds(r) - if err != nil { - return nil, err -@@ -664,7 +666,7 @@ func ParseDetectedFieldsQuery(r *http.Request) (*logproto.DetectedFieldsRequest, - return nil, err - } - -- result.FieldLimit, err = fieldLimit(r) -+ result.Limit, err = detectedFieldsLimit(r) - if err != nil { - return nil, err - } -@@ -684,9 +686,15 @@ func ParseDetectedFieldsQuery(r *http.Request) (*logproto.DetectedFieldsRequest, - if (result.End.Sub(result.Start) / step) > 11000 { - return nil, errStepTooSmall - } -+ - return result, nil - } - -+func values(r *http.Request) (bool, string) { -+ name, ok := mux.Vars(r)[""name""] -+ return ok, name -+} -+ - func targetLabels(r *http.Request) []string { - lbls := strings.Split(r.Form.Get(""targetLabels""), "","") - if (len(lbls) == 1 && lbls[0] == """") || len(lbls) == 0 { -diff --git a/pkg/logproto/compat.go b/pkg/logproto/compat.go -index ca45a2add0aa9..405b29cc6148d 100644 ---- a/pkg/logproto/compat.go -+++ b/pkg/logproto/compat.go -@@ -524,7 +524,7 @@ func (m *DetectedFieldsRequest) LogToSpan(sp opentracing.Span) { - otlog.String(""start"", m.Start.String()), - otlog.String(""end"", m.End.String()), - otlog.String(""step"", time.Duration(m.Step).String()), -- otlog.String(""field_limit"", fmt.Sprintf(""%d"", m.FieldLimit)), -+ otlog.String(""field_limit"", fmt.Sprintf(""%d"", m.Limit)), - otlog.String(""line_limit"", fmt.Sprintf(""%d"", m.LineLimit)), - } - sp.LogFields(fields...) -diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go -index e5c36bb22b8d7..27b36e7bff093 100644 ---- a/pkg/logproto/logproto.pb.go -+++ b/pkg/logproto/logproto.pb.go -@@ -2696,12 +2696,14 @@ func (m *Volume) GetVolume() uint64 { - } - - type DetectedFieldsRequest struct { -- Start time.Time `protobuf:""bytes,1,opt,name=start,proto3,stdtime"" json:""start""` -- End time.Time `protobuf:""bytes,2,opt,name=end,proto3,stdtime"" json:""end""` -- Query string `protobuf:""bytes,3,opt,name=query,proto3"" json:""query,omitempty""` -- LineLimit uint32 `protobuf:""varint,4,opt,name=lineLimit,proto3"" json:""lineLimit,omitempty""` -- FieldLimit uint32 `protobuf:""varint,5,opt,name=fieldLimit,proto3"" json:""fieldLimit,omitempty""` -- Step int64 `protobuf:""varint,6,opt,name=step,proto3"" json:""step,omitempty""` -+ Start time.Time `protobuf:""bytes,1,opt,name=start,proto3,stdtime"" json:""start""` -+ End time.Time `protobuf:""bytes,2,opt,name=end,proto3,stdtime"" json:""end""` -+ Query string `protobuf:""bytes,3,opt,name=query,proto3"" json:""query,omitempty""` -+ LineLimit uint32 `protobuf:""varint,4,opt,name=lineLimit,proto3"" json:""lineLimit,omitempty""` -+ Limit uint32 `protobuf:""varint,5,opt,name=limit,proto3"" json:""limit,omitempty""` -+ Step int64 `protobuf:""varint,6,opt,name=step,proto3"" json:""step,omitempty""` -+ Values bool `protobuf:""varint,7,opt,name=values,proto3"" json:""values,omitempty""` -+ Name string `protobuf:""bytes,8,opt,name=name,proto3"" json:""name,omitempty""` - } - - func (m *DetectedFieldsRequest) Reset() { *m = DetectedFieldsRequest{} } -@@ -2764,9 +2766,9 @@ func (m *DetectedFieldsRequest) GetLineLimit() uint32 { - return 0 - } - --func (m *DetectedFieldsRequest) GetFieldLimit() uint32 { -+func (m *DetectedFieldsRequest) GetLimit() uint32 { - if m != nil { -- return m.FieldLimit -+ return m.Limit - } - return 0 - } -@@ -2778,9 +2780,24 @@ func (m *DetectedFieldsRequest) GetStep() int64 { - return 0 - } - -+func (m *DetectedFieldsRequest) GetValues() bool { -+ if m != nil { -+ return m.Values -+ } -+ return false -+} -+ -+func (m *DetectedFieldsRequest) GetName() string { -+ if m != nil { -+ return m.Name -+ } -+ return """" -+} -+ - type DetectedFieldsResponse struct { -- Fields []*DetectedField `protobuf:""bytes,1,rep,name=fields,proto3"" json:""fields,omitempty""` -- FieldLimit uint32 `protobuf:""varint,2,opt,name=fieldLimit,proto3"" json:""fieldLimit,omitempty""` -+ Fields []*DetectedField `protobuf:""bytes,1,rep,name=fields,proto3"" json:""fields,omitempty""` -+ Limit uint32 `protobuf:""varint,2,opt,name=limit,proto3"" json:""limit,omitempty""` -+ Values []string `protobuf:""bytes,3,rep,name=values,proto3"" json:""values,omitempty""` - } - - func (m *DetectedFieldsResponse) Reset() { *m = DetectedFieldsResponse{} } -@@ -2822,13 +2839,20 @@ func (m *DetectedFieldsResponse) GetFields() []*DetectedField { - return nil - } - --func (m *DetectedFieldsResponse) GetFieldLimit() uint32 { -+func (m *DetectedFieldsResponse) GetLimit() uint32 { - if m != nil { -- return m.FieldLimit -+ return m.Limit - } - return 0 - } - -+func (m *DetectedFieldsResponse) GetValues() []string { -+ if m != nil { -+ return m.Values -+ } -+ return nil -+} -+ - // TODO: make the detected field include the serialized sketch - // we only want cardinality in the JSON response - type DetectedField struct { -@@ -3130,179 +3154,180 @@ func init() { - func init() { proto.RegisterFile(""pkg/logproto/logproto.proto"", fileDescriptor_c28a5f14f1f4c79a) } - - var fileDescriptor_c28a5f14f1f4c79a = []byte{ -- // 2739 bytes of a gzipped FileDescriptorProto -- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x3a, 0x4d, 0x6c, 0x1b, 0xc7, -- 0xd5, 0x5c, 0x72, 0x49, 0x91, 0x8f, 0x94, 0x2c, 0x8f, 0x68, 0x9b, 0x90, 0x1d, 0xae, 0x32, 0xf8, -- 0xbe, 0xc4, 0x5f, 0xec, 0x88, 0xb6, 0xf3, 0x25, 0x75, 0x9c, 0xa6, 0xa9, 0x29, 0xc5, 0x8e, 0x1d, -- 0xc5, 0x76, 0x46, 0x8e, 0x93, 0x16, 0x0d, 0x82, 0x35, 0x39, 0x22, 0x17, 0x26, 0x77, 0xe9, 0xdd, -- 0x61, 0x1c, 0xde, 0x0a, 0xf4, 0x5c, 0x34, 0x40, 0x0f, 0x6d, 0x2f, 0x05, 0x0a, 0x14, 0x68, 0x51, -- 0x20, 0x97, 0xa2, 0xc7, 0xa2, 0xbd, 0x14, 0x68, 0x7a, 0xcb, 0x31, 0xc8, 0x81, 0x6d, 0x94, 0x4b, -- 0x21, 0xa0, 0x40, 0x80, 0x02, 0x2d, 0x90, 0x53, 0x31, 0x7f, 0xbb, 0xb3, 0x2b, 0xaa, 0x0e, 0x5d, -- 0x17, 0x49, 0x2e, 0xe4, 0xcc, 0x9b, 0x37, 0x6f, 0xe6, 0xfd, 0xcc, 0xfb, 0x23, 0xe1, 0xf8, 0xe8, -- 0x4e, 0xaf, 0x35, 0x08, 0x7a, 0xa3, 0x30, 0x60, 0x41, 0x3c, 0x58, 0x17, 0x9f, 0xa8, 0xac, 0xe7, -- 0xab, 0xf5, 0x5e, 0xd0, 0x0b, 0x24, 0x0e, 0x1f, 0xc9, 0xf5, 0x55, 0xa7, 0x17, 0x04, 0xbd, 0x01, -- 0x6d, 0x89, 0xd9, 0xed, 0xf1, 0x4e, 0x8b, 0x79, 0x43, 0x1a, 0x31, 0x77, 0x38, 0x52, 0x08, 0x6b, -- 0x8a, 0xfa, 0xdd, 0xc1, 0x30, 0xe8, 0xd2, 0x41, 0x2b, 0x62, 0x2e, 0x8b, 0xe4, 0xa7, 0xc2, 0x58, -- 0xe1, 0x18, 0xa3, 0x71, 0xd4, 0x17, 0x1f, 0x0a, 0x78, 0x86, 0x03, 0x23, 0x16, 0x84, 0x6e, 0x8f, -- 0xb6, 0x3a, 0xfd, 0xb1, 0x7f, 0xa7, 0xd5, 0x71, 0x3b, 0x7d, 0xda, 0x0a, 0x69, 0x34, 0x1e, 0xb0, -- 0x48, 0x4e, 0xd8, 0x64, 0x44, 0x15, 0x19, 0xfc, 0x1b, 0x0b, 0x8e, 0x6c, 0xb9, 0xb7, 0xe9, 0xe0, -- 0x66, 0x70, 0xcb, 0x1d, 0x8c, 0x69, 0x44, 0x68, 0x34, 0x0a, 0xfc, 0x88, 0xa2, 0x0d, 0x28, 0x0d, -- 0xf8, 0x42, 0xd4, 0xb0, 0xd6, 0x0a, 0x27, 0xab, 0xe7, 0x4e, 0xad, 0xc7, 0x4c, 0xce, 0xdc, 0x20, -- 0xa1, 0xd1, 0x8b, 0x3e, 0x0b, 0x27, 0x44, 0x6d, 0x5d, 0xbd, 0x05, 0x55, 0x03, 0x8c, 0x96, 0xa1, -- 0x70, 0x87, 0x4e, 0x1a, 0xd6, 0x9a, 0x75, 0xb2, 0x42, 0xf8, 0x10, 0x9d, 0x85, 0xe2, 0xdb, 0x9c, -- 0x4c, 0x23, 0xbf, 0x66, 0x9d, 0xac, 0x9e, 0x3b, 0x9e, 0x1c, 0xf2, 0x9a, 0xef, 0xdd, 0x1d, 0x53, -- 0xb1, 0x5b, 0x1d, 0x24, 0x31, 0x2f, 0xe4, 0xcf, 0x5b, 0xf8, 0x14, 0x1c, 0xde, 0xb7, 0x8e, 0x8e, -- 0x42, 0x49, 0x60, 0xc8, 0x1b, 0x57, 0x88, 0x9a, 0xe1, 0x3a, 0xa0, 0x6d, 0x16, 0x52, 0x77, 0x48, -- 0x5c, 0xc6, 0xef, 0x7b, 0x77, 0x4c, 0x23, 0x86, 0x5f, 0x81, 0x95, 0x14, 0x54, 0xb1, 0xfd, 0x0c, -- 0x54, 0xa3, 0x04, 0xac, 0x78, 0xaf, 0x27, 0xd7, 0x4a, 0xf6, 0x10, 0x13, 0x11, 0xff, 0xd4, 0x02, -- 0x48, 0xd6, 0x50, 0x13, 0x40, 0xae, 0xbe, 0xe4, 0x46, 0x7d, 0xc1, 0xb0, 0x4d, 0x0c, 0x08, 0x3a, -- 0x0d, 0x87, 0x93, 0xd9, 0xb5, 0x60, 0xbb, 0xef, 0x86, 0x5d, 0x21, 0x03, 0x9b, 0xec, 0x5f, 0x40, -- 0x08, 0xec, 0xd0, 0x65, 0xb4, 0x51, 0x58, 0xb3, 0x4e, 0x16, 0x88, 0x18, 0x73, 0x6e, 0x19, 0xf5, -- 0x5d, 0x9f, 0x35, 0x6c, 0x21, 0x4e, 0x35, 0xe3, 0x70, 0x6e, 0x11, 0x34, 0x6a, 0x14, 0xd7, 0xac, -- 0x93, 0x8b, 0x44, 0xcd, 0xf0, 0x3f, 0x0a, 0x50, 0x7b, 0x75, 0x4c, 0xc3, 0x89, 0x12, 0x00, 0x6a, -- 0x42, 0x39, 0xa2, 0x03, 0xda, 0x61, 0x41, 0x28, 0x35, 0xd2, 0xce, 0x37, 0x2c, 0x12, 0xc3, 0x50, -- 0x1d, 0x8a, 0x03, 0x6f, 0xe8, 0x31, 0x71, 0xad, 0x45, 0x22, 0x27, 0xe8, 0x02, 0x14, 0x23, 0xe6, -- 0x86, 0x4c, 0xdc, 0xa5, 0x7a, 0x6e, 0x75, 0x5d, 0x9a, 0xf2, 0xba, 0x36, 0xe5, 0xf5, 0x9b, 0xda, -- 0x94, 0xdb, 0xe5, 0xf7, 0xa7, 0x4e, 0xee, 0xdd, 0x3f, 0x3b, 0x16, 0x91, 0x5b, 0xd0, 0x33, 0x50, -- 0xa0, 0x7e, 0x57, 0xdc, 0xf7, 0xf3, 0xee, 0xe4, 0x1b, 0xd0, 0x59, 0xa8, 0x74, 0xbd, 0x90, 0x76, -- 0x98, 0x17, 0xf8, 0x82, 0xab, 0xa5, 0x73, 0x2b, 0x89, 0x46, 0x36, 0xf5, 0x12, 0x49, 0xb0, 0xd0, -- 0x69, 0x28, 0x45, 0x5c, 0x74, 0x51, 0x63, 0x81, 0xdb, 0x42, 0xbb, 0xbe, 0x37, 0x75, 0x96, 0x25, -- 0xe4, 0x74, 0x30, 0xf4, 0x18, 0x1d, 0x8e, 0xd8, 0x84, 0x28, 0x1c, 0xf4, 0x04, 0x2c, 0x74, 0xe9, -- 0x80, 0x72, 0x85, 0x97, 0x85, 0xc2, 0x97, 0x0d, 0xf2, 0x62, 0x81, 0x68, 0x04, 0xf4, 0x26, 0xd8, -- 0xa3, 0x81, 0xeb, 0x37, 0x2a, 0x82, 0x8b, 0xa5, 0x04, 0xf1, 0xc6, 0xc0, 0xf5, 0xdb, 0xcf, 0x7e, -- 0x34, 0x75, 0x9e, 0xee, 0x79, 0xac, 0x3f, 0xbe, 0xbd, 0xde, 0x09, 0x86, 0xad, 0x5e, 0xe8, 0xee, -- 0xb8, 0xbe, 0xdb, 0x1a, 0x04, 0x77, 0xbc, 0xd6, 0xdb, 0x4f, 0xb5, 0xf8, 0x03, 0xbd, 0x3b, 0xa6, -- 0xa1, 0x47, 0xc3, 0x16, 0x27, 0xb3, 0x2e, 0x54, 0xc2, 0xb7, 0x12, 0x41, 0x16, 0x5d, 0xe5, 0xf6, -- 0x17, 0x84, 0x74, 0x83, 0xbf, 0xde, 0xa8, 0x01, 0xe2, 0x94, 0x63, 0xc9, 0x29, 0x02, 0x4e, 0xe8, -- 0xce, 0xe5, 0x30, 0x18, 0x8f, 0xda, 0x87, 0xf6, 0xa6, 0x8e, 0x89, 0x4f, 0xcc, 0xc9, 0x55, 0xbb, -- 0x5c, 0x5a, 0x5e, 0xc0, 0xef, 0x15, 0x00, 0x6d, 0xbb, 0xc3, 0xd1, 0x80, 0xce, 0xa5, 0xfe, 0x58, -- 0xd1, 0xf9, 0x07, 0x56, 0x74, 0x61, 0x5e, 0x45, 0x27, 0x5a, 0xb3, 0xe7, 0xd3, 0x5a, 0xf1, 0xf3, -- 0x6a, 0xad, 0xf4, 0xa5, 0xd7, 0x1a, 0x6e, 0x80, 0xcd, 0x29, 0x73, 0x67, 0x19, 0xba, 0xf7, 0x84, -- 0x6e, 0x6a, 0x84, 0x0f, 0xf1, 0x16, 0x94, 0x24, 0x5f, 0x68, 0x35, 0xab, 0xbc, 0xf4, 0xbb, 0x4d, -- 0x14, 0x57, 0xd0, 0x2a, 0x59, 0x4e, 0x54, 0x52, 0x10, 0xc2, 0xc6, 0xbf, 0xb5, 0x60, 0x51, 0x59, -- 0x84, 0xf2, 0x7d, 0xb7, 0x61, 0x41, 0xfa, 0x1e, 0xed, 0xf7, 0x8e, 0x65, 0xfd, 0xde, 0xc5, 0xae, -- 0x3b, 0x62, 0x34, 0x6c, 0xb7, 0xde, 0x9f, 0x3a, 0xd6, 0x47, 0x53, 0xe7, 0xf1, 0x83, 0x84, 0xa6, -- 0xa3, 0x93, 0xf6, 0x97, 0x9a, 0x30, 0x3a, 0x25, 0x6e, 0xc7, 0x22, 0x65, 0x56, 0x87, 0xd6, 0x65, -- 0x50, 0xbb, 0xe2, 0xf7, 0x68, 0xc4, 0x29, 0xdb, 0xdc, 0x22, 0x88, 0xc4, 0xe1, 0x6c, 0xde, 0x73, -- 0x43, 0xdf, 0xf3, 0x7b, 0x51, 0xa3, 0x20, 0x7c, 0x7a, 0x3c, 0xc7, 0x3f, 0xb6, 0x60, 0x25, 0x65, -- 0xd6, 0x8a, 0x89, 0xf3, 0x50, 0x8a, 0xb8, 0xa6, 0x34, 0x0f, 0x86, 0x51, 0x6c, 0x0b, 0x78, 0x7b, -- 0x49, 0x5d, 0xbe, 0x24, 0xe7, 0x44, 0xe1, 0x3f, 0xbc, 0xab, 0xfd, 0xc1, 0x82, 0x9a, 0x08, 0x4c, -+ // 2764 bytes of a gzipped FileDescriptorProto -+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x3a, 0xcd, 0x6f, 0x1b, 0xc7, -+ 0xf5, 0x5c, 0x72, 0x49, 0x91, 0x8f, 0x94, 0x2c, 0x8f, 0x68, 0x9b, 0x90, 0x1d, 0xae, 0x32, 0xf8, -+ 0xfd, 0x12, 0x27, 0x76, 0x44, 0xdb, 0x69, 0xd2, 0xc4, 0x69, 0x9a, 0x9a, 0x52, 0xec, 0xd8, 0x51, -+ 0x6c, 0x67, 0xe4, 0x38, 0x69, 0xd1, 0x20, 0x58, 0x93, 0x23, 0x72, 0x61, 0x72, 0x97, 0xde, 0x1d, -+ 0xc6, 0xe1, 0xad, 0xff, 0x40, 0xd1, 0x00, 0x45, 0xd1, 0xf6, 0x52, 0xa0, 0x40, 0x81, 0x16, 0x05, -+ 0x72, 0x29, 0x7a, 0xe8, 0xa1, 0x68, 0x2f, 0x05, 0x9a, 0xde, 0x72, 0x0c, 0x72, 0x60, 0x1b, 0xe5, -+ 0x52, 0x08, 0x28, 0x90, 0x53, 0x0b, 0xe4, 0x54, 0xcc, 0xd7, 0xee, 0xec, 0x8a, 0xaa, 0x43, 0xd7, -+ 0x45, 0x92, 0x0b, 0x39, 0xf3, 0xe6, 0xcd, 0x9b, 0x79, 0x1f, 0xf3, 0xbe, 0x48, 0x38, 0x3e, 0xba, -+ 0xdd, 0x6b, 0x0d, 0x82, 0xde, 0x28, 0x0c, 0x58, 0x10, 0x0f, 0xd6, 0xc5, 0x27, 0x2a, 0xeb, 0xf9, -+ 0x6a, 0xbd, 0x17, 0xf4, 0x02, 0x89, 0xc3, 0x47, 0x72, 0x7d, 0xd5, 0xe9, 0x05, 0x41, 0x6f, 0x40, -+ 0x5b, 0x62, 0x76, 0x6b, 0xbc, 0xd3, 0x62, 0xde, 0x90, 0x46, 0xcc, 0x1d, 0x8e, 0x14, 0xc2, 0x9a, -+ 0xa2, 0x7e, 0x67, 0x30, 0x0c, 0xba, 0x74, 0xd0, 0x8a, 0x98, 0xcb, 0x22, 0xf9, 0xa9, 0x30, 0x56, -+ 0x38, 0xc6, 0x68, 0x1c, 0xf5, 0xc5, 0x87, 0x02, 0x9e, 0xe1, 0xc0, 0x88, 0x05, 0xa1, 0xdb, 0xa3, -+ 0xad, 0x4e, 0x7f, 0xec, 0xdf, 0x6e, 0x75, 0xdc, 0x4e, 0x9f, 0xb6, 0x42, 0x1a, 0x8d, 0x07, 0x2c, -+ 0x92, 0x13, 0x36, 0x19, 0x51, 0x45, 0x06, 0xff, 0xd6, 0x82, 0x23, 0x5b, 0xee, 0x2d, 0x3a, 0xb8, -+ 0x11, 0xdc, 0x74, 0x07, 0x63, 0x1a, 0x11, 0x1a, 0x8d, 0x02, 0x3f, 0xa2, 0x68, 0x03, 0x4a, 0x03, -+ 0xbe, 0x10, 0x35, 0xac, 0xb5, 0xc2, 0xc9, 0xea, 0xb9, 0x53, 0xeb, 0x31, 0x93, 0x33, 0x37, 0x48, -+ 0x68, 0xf4, 0xa2, 0xcf, 0xc2, 0x09, 0x51, 0x5b, 0x57, 0x6f, 0x42, 0xd5, 0x00, 0xa3, 0x65, 0x28, -+ 0xdc, 0xa6, 0x93, 0x86, 0xb5, 0x66, 0x9d, 0xac, 0x10, 0x3e, 0x44, 0x67, 0xa1, 0xf8, 0x36, 0x27, -+ 0xd3, 0xc8, 0xaf, 0x59, 0x27, 0xab, 0xe7, 0x8e, 0x27, 0x87, 0xbc, 0xe6, 0x7b, 0x77, 0xc6, 0x54, -+ 0xec, 0x56, 0x07, 0x49, 0xcc, 0xf3, 0xf9, 0x67, 0x2c, 0x7c, 0x0a, 0x0e, 0xef, 0x5b, 0x47, 0x47, -+ 0xa1, 0x24, 0x30, 0xe4, 0x8d, 0x2b, 0x44, 0xcd, 0x70, 0x1d, 0xd0, 0x36, 0x0b, 0xa9, 0x3b, 0x24, -+ 0x2e, 0xe3, 0xf7, 0xbd, 0x33, 0xa6, 0x11, 0xc3, 0xaf, 0xc0, 0x4a, 0x0a, 0xaa, 0xd8, 0x7e, 0x1a, -+ 0xaa, 0x51, 0x02, 0x56, 0xbc, 0xd7, 0x93, 0x6b, 0x25, 0x7b, 0x88, 0x89, 0x88, 0x7f, 0x66, 0x01, -+ 0x24, 0x6b, 0xa8, 0x09, 0x20, 0x57, 0x5f, 0x72, 0xa3, 0xbe, 0x60, 0xd8, 0x26, 0x06, 0x04, 0x9d, -+ 0x86, 0xc3, 0xc9, 0xec, 0x6a, 0xb0, 0xdd, 0x77, 0xc3, 0xae, 0x90, 0x81, 0x4d, 0xf6, 0x2f, 0x20, -+ 0x04, 0x76, 0xe8, 0x32, 0xda, 0x28, 0xac, 0x59, 0x27, 0x0b, 0x44, 0x8c, 0x39, 0xb7, 0x8c, 0xfa, -+ 0xae, 0xcf, 0x1a, 0xb6, 0x10, 0xa7, 0x9a, 0x71, 0x38, 0xb7, 0x08, 0x1a, 0x35, 0x8a, 0x6b, 0xd6, -+ 0xc9, 0x45, 0xa2, 0x66, 0xf8, 0x9f, 0x05, 0xa8, 0xbd, 0x3a, 0xa6, 0xe1, 0x44, 0x09, 0x00, 0x35, -+ 0xa1, 0x1c, 0xd1, 0x01, 0xed, 0xb0, 0x20, 0x94, 0x1a, 0x69, 0xe7, 0x1b, 0x16, 0x89, 0x61, 0xa8, -+ 0x0e, 0xc5, 0x81, 0x37, 0xf4, 0x98, 0xb8, 0xd6, 0x22, 0x91, 0x13, 0x74, 0x1e, 0x8a, 0x11, 0x73, -+ 0x43, 0x26, 0xee, 0x52, 0x3d, 0xb7, 0xba, 0x2e, 0x4d, 0x79, 0x5d, 0x9b, 0xf2, 0xfa, 0x0d, 0x6d, -+ 0xca, 0xed, 0xf2, 0xfb, 0x53, 0x27, 0xf7, 0xee, 0x5f, 0x1d, 0x8b, 0xc8, 0x2d, 0xe8, 0x69, 0x28, -+ 0x50, 0xbf, 0x2b, 0xee, 0xfb, 0x79, 0x77, 0xf2, 0x0d, 0xe8, 0x2c, 0x54, 0xba, 0x5e, 0x48, 0x3b, -+ 0xcc, 0x0b, 0x7c, 0xc1, 0xd5, 0xd2, 0xb9, 0x95, 0x44, 0x23, 0x9b, 0x7a, 0x89, 0x24, 0x58, 0xe8, -+ 0x34, 0x94, 0x22, 0x2e, 0xba, 0xa8, 0xb1, 0xc0, 0x6d, 0xa1, 0x5d, 0xdf, 0x9b, 0x3a, 0xcb, 0x12, -+ 0x72, 0x3a, 0x18, 0x7a, 0x8c, 0x0e, 0x47, 0x6c, 0x42, 0x14, 0x0e, 0x7a, 0x1c, 0x16, 0xba, 0x74, -+ 0x40, 0xb9, 0xc2, 0xcb, 0x42, 0xe1, 0xcb, 0x06, 0x79, 0xb1, 0x40, 0x34, 0x02, 0x7a, 0x13, 0xec, -+ 0xd1, 0xc0, 0xf5, 0x1b, 0x15, 0xc1, 0xc5, 0x52, 0x82, 0x78, 0x7d, 0xe0, 0xfa, 0xed, 0x67, 0x3f, -+ 0x9a, 0x3a, 0x4f, 0xf5, 0x3c, 0xd6, 0x1f, 0xdf, 0x5a, 0xef, 0x04, 0xc3, 0x56, 0x2f, 0x74, 0x77, -+ 0x5c, 0xdf, 0x6d, 0x0d, 0x82, 0xdb, 0x5e, 0xeb, 0xed, 0x27, 0x5b, 0xfc, 0x81, 0xde, 0x19, 0xd3, -+ 0xd0, 0xa3, 0x61, 0x8b, 0x93, 0x59, 0x17, 0x2a, 0xe1, 0x5b, 0x89, 0x20, 0x8b, 0xae, 0x70, 0xfb, -+ 0x0b, 0x42, 0xba, 0xc1, 0x5f, 0x6f, 0xd4, 0x00, 0x71, 0xca, 0xb1, 0xe4, 0x14, 0x01, 0x27, 0x74, -+ 0xe7, 0x52, 0x18, 0x8c, 0x47, 0xed, 0x43, 0x7b, 0x53, 0xc7, 0xc4, 0x27, 0xe6, 0xe4, 0x8a, 0x5d, -+ 0x2e, 0x2d, 0x2f, 0xe0, 0xf7, 0x0a, 0x80, 0xb6, 0xdd, 0xe1, 0x68, 0x40, 0xe7, 0x52, 0x7f, 0xac, -+ 0xe8, 0xfc, 0x7d, 0x2b, 0xba, 0x30, 0xaf, 0xa2, 0x13, 0xad, 0xd9, 0xf3, 0x69, 0xad, 0xf8, 0x79, -+ 0xb5, 0x56, 0xfa, 0xd2, 0x6b, 0x0d, 0x37, 0xc0, 0xe6, 0x94, 0xb9, 0xb3, 0x0c, 0xdd, 0xbb, 0x42, -+ 0x37, 0x35, 0xc2, 0x87, 0x78, 0x0b, 0x4a, 0x92, 0x2f, 0xb4, 0x9a, 0x55, 0x5e, 0xfa, 0xdd, 0x26, -+ 0x8a, 0x2b, 0x68, 0x95, 0x2c, 0x27, 0x2a, 0x29, 0x08, 0x61, 0xe3, 0xdf, 0x5b, 0xb0, 0xa8, 0x2c, -+ 0x42, 0xf9, 0xbe, 0x5b, 0xb0, 0x20, 0x7d, 0x8f, 0xf6, 0x7b, 0xc7, 0xb2, 0x7e, 0xef, 0x42, 0xd7, -+ 0x1d, 0x31, 0x1a, 0xb6, 0x5b, 0xef, 0x4f, 0x1d, 0xeb, 0xa3, 0xa9, 0xf3, 0xe8, 0x41, 0x42, 0xd3, -+ 0xd1, 0x49, 0xfb, 0x4b, 0x4d, 0x18, 0x9d, 0x12, 0xb7, 0x63, 0x91, 0x32, 0xab, 0x43, 0xeb, 0x32, -+ 0xa8, 0x5d, 0xf6, 0x7b, 0x34, 0xe2, 0x94, 0x6d, 0x6e, 0x11, 0x44, 0xe2, 0x70, 0x36, 0xef, 0xba, -+ 0xa1, 0xef, 0xf9, 0xbd, 0xa8, 0x51, 0x10, 0x3e, 0x3d, 0x9e, 0xe3, 0x9f, 0x58, 0xb0, 0x92, 0x32, -+ 0x6b, 0xc5, 0xc4, 0x33, 0x50, 0x8a, 0xb8, 0xa6, 0x34, 0x0f, 0x86, 0x51, 0x6c, 0x0b, 0x78, 0x7b, -+ 0x49, 0x5d, 0xbe, 0x24, 0xe7, 0x44, 0xe1, 0x3f, 0xb8, 0xab, 0xfd, 0xc9, 0x82, 0x9a, 0x08, 0x4c, - 0xfa, 0xad, 0x21, 0xb0, 0x7d, 0x77, 0x48, 0x95, 0xaa, 0xc4, 0xd8, 0x88, 0x56, 0xfc, 0xb8, 0xb2, -- 0x8e, 0x56, 0xf3, 0x3a, 0x58, 0xeb, 0x81, 0x1d, 0xac, 0x95, 0xbc, 0xbb, 0x3a, 0x14, 0xb9, 0x79, -- 0x4f, 0x84, 0x73, 0xad, 0x10, 0x39, 0xc1, 0x8f, 0xc3, 0xa2, 0xe2, 0x42, 0x89, 0xf6, 0xa0, 0x00, -- 0x3b, 0x84, 0x92, 0xd4, 0x04, 0xfa, 0x1f, 0xa8, 0xc4, 0xa9, 0x8c, 0xe0, 0xb6, 0xd0, 0x2e, 0xed, -+ 0x8e, 0x56, 0xf3, 0x3a, 0x58, 0xeb, 0xbe, 0x1d, 0xac, 0x95, 0xbc, 0xbb, 0x3a, 0x14, 0xb9, 0x79, -+ 0x4f, 0x84, 0x73, 0xad, 0x10, 0x39, 0xc1, 0x8f, 0xc2, 0xa2, 0xe2, 0x42, 0x89, 0xf6, 0xa0, 0x00, -+ 0x3b, 0x84, 0x92, 0xd4, 0x04, 0xfa, 0x3f, 0xa8, 0xc4, 0xa9, 0x8c, 0xe0, 0xb6, 0xd0, 0x2e, 0xed, - 0x4d, 0x9d, 0x3c, 0x8b, 0x48, 0xb2, 0x80, 0x1c, 0x33, 0xe8, 0x5b, 0xed, 0xca, 0xde, 0xd4, 0x91, - 0x00, 0x15, 0xe2, 0xd1, 0x09, 0xb0, 0xfb, 0x3c, 0x6e, 0x72, 0x11, 0xd8, 0xed, 0xf2, 0xde, 0xd4, -- 0x11, 0x73, 0x22, 0x3e, 0xf1, 0x65, 0xa8, 0x6d, 0xd1, 0x9e, 0xdb, 0x99, 0xa8, 0x43, 0xeb, 0x9a, -- 0x1c, 0x3f, 0xd0, 0xd2, 0x34, 0x1e, 0x85, 0x5a, 0x7c, 0xe2, 0x5b, 0xc3, 0x48, 0xbd, 0x86, 0x6a, -- 0x0c, 0x7b, 0x25, 0xc2, 0x3f, 0xb1, 0x40, 0xd9, 0x00, 0xc2, 0x46, 0xb6, 0xc3, 0x7d, 0x21, 0xec, -+ 0x11, 0x73, 0x22, 0x3e, 0xf1, 0x25, 0xa8, 0x6d, 0xd1, 0x9e, 0xdb, 0x99, 0xa8, 0x43, 0xeb, 0x9a, -+ 0x1c, 0x3f, 0xd0, 0xd2, 0x34, 0x1e, 0x86, 0x5a, 0x7c, 0xe2, 0x5b, 0xc3, 0x48, 0xbd, 0x86, 0x6a, -+ 0x0c, 0x7b, 0x25, 0xc2, 0x3f, 0xb5, 0x40, 0xd9, 0x00, 0xc2, 0x46, 0xb6, 0xc3, 0x7d, 0x21, 0xec, - 0x4d, 0x1d, 0x05, 0xd1, 0xc9, 0x0c, 0x7a, 0x0e, 0x16, 0x22, 0x71, 0x22, 0x27, 0x96, 0x35, 0x2d, - 0xb1, 0xd0, 0x3e, 0xc4, 0x4d, 0x64, 0x6f, 0xea, 0x68, 0x44, 0xa2, 0x07, 0x68, 0x3d, 0x95, 0x10, -- 0x48, 0xc6, 0x96, 0xf6, 0xa6, 0x8e, 0x01, 0x35, 0x13, 0x04, 0xfc, 0x99, 0x05, 0xd5, 0x9b, 0xae, -+ 0x48, 0xc6, 0x96, 0xf6, 0xa6, 0x8e, 0x01, 0x35, 0x13, 0x04, 0xfc, 0x99, 0x05, 0xd5, 0x1b, 0xae, - 0x17, 0x9b, 0x50, 0x43, 0xab, 0x28, 0xf1, 0xd5, 0x12, 0xc0, 0x2d, 0xb1, 0x4b, 0x07, 0xee, 0xe4, -- 0x52, 0x10, 0x0a, 0xba, 0x8b, 0x24, 0x9e, 0x27, 0x31, 0xdc, 0x9e, 0x19, 0xc3, 0x8b, 0xf3, 0xbb, -- 0xf6, 0xff, 0xae, 0x23, 0xbd, 0x6a, 0x97, 0xf3, 0xcb, 0x05, 0xfc, 0x9e, 0x05, 0x35, 0xc9, 0xbc, -- 0xb2, 0xbc, 0xef, 0x40, 0x49, 0xca, 0x46, 0xb0, 0xff, 0x6f, 0x1c, 0xd3, 0xa9, 0x79, 0x9c, 0x92, -+ 0x62, 0x10, 0x0a, 0xba, 0x8b, 0x24, 0x9e, 0x27, 0x31, 0xdc, 0x9e, 0x19, 0xc3, 0x8b, 0xf3, 0xbb, -+ 0xf6, 0xff, 0xad, 0x23, 0xbd, 0x62, 0x97, 0xf3, 0xcb, 0x05, 0xfc, 0x9e, 0x05, 0x35, 0xc9, 0xbc, -+ 0xb2, 0xbc, 0xef, 0x42, 0x49, 0xca, 0x46, 0xb0, 0xff, 0x1f, 0x1c, 0xd3, 0xa9, 0x79, 0x9c, 0x92, - 0xa2, 0x89, 0x5e, 0x80, 0xa5, 0x6e, 0x18, 0x8c, 0x46, 0xb4, 0xbb, 0xad, 0xdc, 0x5f, 0x3e, 0xeb, -- 0xfe, 0x36, 0xcd, 0x75, 0x92, 0x41, 0xc7, 0x7f, 0xb2, 0x60, 0x51, 0x39, 0x13, 0xa5, 0xae, 0x58, -- 0xc4, 0xd6, 0x03, 0x47, 0xcf, 0xfc, 0xbc, 0xd1, 0xf3, 0x28, 0x94, 0x7a, 0x3c, 0xbe, 0x68, 0x87, -- 0xa4, 0x66, 0xf3, 0x45, 0x55, 0x7c, 0x15, 0x96, 0x34, 0x2b, 0x07, 0x78, 0xd4, 0xd5, 0xac, 0x47, -- 0xbd, 0xd2, 0xa5, 0x3e, 0xf3, 0x76, 0xbc, 0xd8, 0x47, 0x2a, 0x7c, 0xfc, 0x03, 0x0b, 0x96, 0xb3, -- 0x28, 0x68, 0x33, 0x53, 0x58, 0x3c, 0x76, 0x30, 0x39, 0xb3, 0xa6, 0xd0, 0xa4, 0x55, 0x65, 0xf1, -- 0xf4, 0xfd, 0x2a, 0x8b, 0xba, 0xe9, 0x64, 0x2a, 0xca, 0x2b, 0xe0, 0x1f, 0x59, 0xb0, 0x98, 0xd2, -- 0x25, 0x3a, 0x0f, 0xf6, 0x4e, 0x18, 0x0c, 0xe7, 0x52, 0x94, 0xd8, 0x81, 0xfe, 0x1f, 0xf2, 0x2c, -- 0x98, 0x4b, 0x4d, 0x79, 0x16, 0x70, 0x2d, 0x29, 0xf6, 0x0b, 0x32, 0x6f, 0x97, 0x33, 0xfc, 0x34, -- 0x54, 0x04, 0x43, 0x37, 0x5c, 0x2f, 0x9c, 0x19, 0x30, 0x66, 0x33, 0xf4, 0x1c, 0x1c, 0x92, 0xce, -+ 0xfe, 0x36, 0xcd, 0x75, 0x92, 0x41, 0xc7, 0x7f, 0xb1, 0x60, 0x51, 0x39, 0x13, 0xa5, 0xae, 0x58, -+ 0xc4, 0xd6, 0x7d, 0x47, 0xcf, 0xfc, 0xbc, 0xd1, 0xf3, 0x28, 0x94, 0x7a, 0x3c, 0xbe, 0x68, 0x87, -+ 0xa4, 0x66, 0xf3, 0x45, 0x55, 0x7c, 0x05, 0x96, 0x34, 0x2b, 0x07, 0x78, 0xd4, 0xd5, 0xac, 0x47, -+ 0xbd, 0xdc, 0xa5, 0x3e, 0xf3, 0x76, 0xbc, 0xd8, 0x47, 0x2a, 0x7c, 0xfc, 0x03, 0x0b, 0x96, 0xb3, -+ 0x28, 0x68, 0x33, 0x53, 0x58, 0x3c, 0x72, 0x30, 0x39, 0xb3, 0xa6, 0xd0, 0xa4, 0x55, 0x65, 0xf1, -+ 0xd4, 0xbd, 0x2a, 0x8b, 0xba, 0xe9, 0x64, 0x2a, 0xca, 0x2b, 0xe0, 0x1f, 0x5b, 0xb0, 0x98, 0xd2, -+ 0x25, 0x7a, 0x06, 0xec, 0x9d, 0x30, 0x18, 0xce, 0xa5, 0x28, 0xb1, 0x03, 0x7d, 0x0d, 0xf2, 0x2c, -+ 0x98, 0x4b, 0x4d, 0x79, 0x16, 0x70, 0x2d, 0x29, 0xf6, 0x0b, 0x32, 0x6f, 0x97, 0x33, 0xfc, 0x14, -+ 0x54, 0x04, 0x43, 0xd7, 0x5d, 0x2f, 0x9c, 0x19, 0x30, 0x66, 0x33, 0xf4, 0x1c, 0x1c, 0x92, 0xce, - 0x70, 0xf6, 0xe6, 0xda, 0xac, 0xcd, 0x35, 0xbd, 0xf9, 0x38, 0x14, 0x45, 0xd2, 0xc1, 0xb7, 0x74, - 0x5d, 0xe6, 0xea, 0x2d, 0x7c, 0x8c, 0x8f, 0xc0, 0x0a, 0x7f, 0x83, 0x34, 0x8c, 0x36, 0x82, 0xb1, - 0xcf, 0x74, 0xdd, 0x74, 0x1a, 0xea, 0x69, 0xb0, 0xb2, 0x92, 0x3a, 0x14, 0x3b, 0x1c, 0x20, 0x68, -- 0x2c, 0x12, 0x39, 0xc1, 0x3f, 0xb7, 0x00, 0x5d, 0xa6, 0x4c, 0x9c, 0x72, 0x65, 0x33, 0x7e, 0x1e, -+ 0x2c, 0x12, 0x39, 0xc1, 0xbf, 0xb0, 0x00, 0x5d, 0xa2, 0x4c, 0x9c, 0x72, 0x79, 0x33, 0x7e, 0x1e, - 0xab, 0x50, 0x1e, 0xba, 0xac, 0xd3, 0xa7, 0x61, 0xa4, 0xf3, 0x17, 0x3d, 0xff, 0x22, 0x12, 0x4f, - 0x7c, 0x16, 0x56, 0x52, 0xb7, 0x54, 0x3c, 0xad, 0x42, 0xb9, 0xa3, 0x60, 0x2a, 0xe4, 0xc5, 0x73, -- 0xfc, 0xeb, 0x3c, 0x94, 0x75, 0x5a, 0x87, 0xce, 0x42, 0x75, 0xc7, 0xf3, 0x7b, 0x34, 0x1c, 0x85, -- 0x9e, 0x12, 0x81, 0x2d, 0xd3, 0x3c, 0x03, 0x4c, 0xcc, 0x09, 0x7a, 0x12, 0x16, 0xc6, 0x11, 0x0d, -- 0xdf, 0xf2, 0xe4, 0x4b, 0xaf, 0xb4, 0xeb, 0xbb, 0x53, 0xa7, 0xf4, 0x5a, 0x44, 0xc3, 0x2b, 0x9b, -+ 0xfc, 0x9b, 0x3c, 0x94, 0x75, 0x5a, 0x87, 0xce, 0x42, 0x75, 0xc7, 0xf3, 0x7b, 0x34, 0x1c, 0x85, -+ 0x9e, 0x12, 0x81, 0x2d, 0xd3, 0x3c, 0x03, 0x4c, 0xcc, 0x09, 0x7a, 0x02, 0x16, 0xc6, 0x11, 0x0d, -+ 0xdf, 0xf2, 0xe4, 0x4b, 0xaf, 0xb4, 0xeb, 0xbb, 0x53, 0xa7, 0xf4, 0x5a, 0x44, 0xc3, 0xcb, 0x9b, - 0x3c, 0xf8, 0x8c, 0xc5, 0x88, 0xc8, 0xef, 0x2e, 0x7a, 0x59, 0x99, 0xa9, 0x48, 0xe0, 0xda, 0x5f, -- 0xe3, 0xd7, 0xcf, 0xb8, 0xba, 0x51, 0x18, 0x0c, 0x29, 0xeb, 0xd3, 0x71, 0xd4, 0xea, 0x04, 0xc3, -- 0x61, 0xe0, 0xb7, 0x44, 0xef, 0x40, 0x30, 0xcd, 0x23, 0x28, 0xdf, 0xae, 0x2c, 0xf7, 0x26, 0x2c, -- 0xb0, 0x7e, 0x18, 0x8c, 0x7b, 0x7d, 0x11, 0x18, 0x0a, 0xed, 0x0b, 0xf3, 0xd3, 0xd3, 0x14, 0x88, -- 0x1e, 0xa0, 0x47, 0xb9, 0xb4, 0x68, 0xe7, 0x4e, 0x34, 0x1e, 0xca, 0xda, 0xb3, 0x5d, 0xdc, 0x9b, -- 0x3a, 0xd6, 0x93, 0x24, 0x06, 0xe3, 0x8b, 0xb0, 0x98, 0x4a, 0x85, 0xd1, 0x19, 0xb0, 0x43, 0xba, -+ 0xe7, 0xd7, 0xcf, 0xb8, 0xba, 0x51, 0x18, 0x0c, 0x29, 0xeb, 0xd3, 0x71, 0xd4, 0xea, 0x04, 0xc3, -+ 0x61, 0xe0, 0xb7, 0x44, 0xef, 0x40, 0x30, 0xcd, 0x23, 0x28, 0xdf, 0xae, 0x2c, 0xf7, 0x06, 0x2c, -+ 0xb0, 0x7e, 0x18, 0x8c, 0x7b, 0x7d, 0x11, 0x18, 0x0a, 0xed, 0xf3, 0xf3, 0xd3, 0xd3, 0x14, 0x88, -+ 0x1e, 0xa0, 0x87, 0xb9, 0xb4, 0x68, 0xe7, 0x76, 0x34, 0x1e, 0xca, 0xda, 0xb3, 0x5d, 0xdc, 0x9b, -+ 0x3a, 0xd6, 0x13, 0x24, 0x06, 0xe3, 0x0b, 0xb0, 0x98, 0x4a, 0x85, 0xd1, 0x19, 0xb0, 0x43, 0xba, - 0xa3, 0x5d, 0x01, 0xda, 0x9f, 0x31, 0xcb, 0xe8, 0xcf, 0x71, 0x88, 0xf8, 0xc4, 0xdf, 0xcf, 0x83, -- 0x63, 0x54, 0xfd, 0x97, 0x82, 0xf0, 0x15, 0xca, 0x42, 0xaf, 0x73, 0xcd, 0x1d, 0x52, 0x6d, 0x5e, -- 0x0e, 0x54, 0x87, 0x02, 0xf8, 0x96, 0xf1, 0x8a, 0x60, 0x18, 0xe3, 0xa1, 0x47, 0x00, 0xc4, 0xb3, -+ 0x63, 0x54, 0xfd, 0x17, 0x83, 0xf0, 0x15, 0xca, 0x42, 0xaf, 0x73, 0xd5, 0x1d, 0x52, 0x6d, 0x5e, -+ 0x0e, 0x54, 0x87, 0x02, 0xf8, 0x96, 0xf1, 0x8a, 0x60, 0x18, 0xe3, 0xa1, 0x87, 0x00, 0xc4, 0xb3, - 0x93, 0xeb, 0xf2, 0x41, 0x55, 0x04, 0x44, 0x2c, 0x6f, 0xa4, 0x84, 0xdd, 0x9a, 0x53, 0x38, 0x4a, -- 0xc8, 0x57, 0xb2, 0x42, 0x9e, 0x9b, 0x4e, 0x2c, 0x59, 0xf3, 0xb9, 0x14, 0xd3, 0xcf, 0x05, 0xff, -- 0xcd, 0x82, 0xe6, 0x96, 0xbe, 0xf9, 0x03, 0x8a, 0x43, 0xf3, 0x9b, 0x7f, 0x48, 0xfc, 0x16, 0x1e, -- 0x22, 0xbf, 0x76, 0x86, 0xdf, 0x26, 0xc0, 0x96, 0xe7, 0xd3, 0x4b, 0xde, 0x80, 0xd1, 0x70, 0x46, -- 0x91, 0xf4, 0xc3, 0x42, 0xe2, 0x71, 0x08, 0xdd, 0xd1, 0x32, 0xd8, 0x30, 0xdc, 0xfc, 0xc3, 0x60, -- 0x31, 0xff, 0x10, 0x59, 0x2c, 0x64, 0x3c, 0xa0, 0x0f, 0x0b, 0x3b, 0x82, 0x3d, 0x19, 0xb1, 0x53, -- 0xfd, 0xa7, 0x84, 0xf7, 0xf6, 0x37, 0xd4, 0xe1, 0xcf, 0xdc, 0x27, 0xe1, 0x12, 0x7d, 0xc4, 0x56, -+ 0xc8, 0x97, 0xb3, 0x42, 0x9e, 0x9b, 0x4e, 0x2c, 0x59, 0xf3, 0xb9, 0x14, 0xd3, 0xcf, 0x05, 0xff, -+ 0xc3, 0x82, 0xe6, 0x96, 0xbe, 0xf9, 0x7d, 0x8a, 0x43, 0xf3, 0x9b, 0x7f, 0x40, 0xfc, 0x16, 0x1e, -+ 0x20, 0xbf, 0x76, 0x86, 0xdf, 0x26, 0xc0, 0x96, 0xe7, 0xd3, 0x8b, 0xde, 0x80, 0xd1, 0x70, 0x46, -+ 0x91, 0xf4, 0xc3, 0x42, 0xe2, 0x71, 0x08, 0xdd, 0xd1, 0x32, 0xd8, 0x30, 0xdc, 0xfc, 0x83, 0x60, -+ 0x31, 0xff, 0x00, 0x59, 0x2c, 0x64, 0x3c, 0xa0, 0x0f, 0x0b, 0x3b, 0x82, 0x3d, 0x19, 0xb1, 0x53, -+ 0xfd, 0xa7, 0x84, 0xf7, 0xf6, 0x37, 0xd5, 0xe1, 0x4f, 0xdf, 0x23, 0xe1, 0x12, 0x7d, 0xc4, 0x56, - 0x34, 0xf1, 0x99, 0xfb, 0x8e, 0xb1, 0x9f, 0xe8, 0x43, 0x90, 0xab, 0x72, 0xba, 0xe2, 0xcc, 0x9c, -- 0xee, 0x79, 0x75, 0xcc, 0x7f, 0x92, 0xd7, 0xe1, 0xe7, 0x13, 0x07, 0x2b, 0x94, 0xa2, 0x1c, 0xec, -- 0x63, 0xf7, 0x7b, 0xfe, 0xea, 0xd1, 0xff, 0xce, 0x82, 0xe5, 0xcb, 0x94, 0xa5, 0x73, 0xac, 0xaf, -- 0x90, 0x4a, 0xf1, 0x4b, 0x70, 0xd8, 0xb8, 0xbf, 0xe2, 0xfe, 0xa9, 0x4c, 0x62, 0x75, 0x24, 0xe1, -- 0xff, 0x8a, 0xdf, 0xa5, 0xef, 0xa8, 0x7a, 0x35, 0x9d, 0x53, 0xdd, 0x80, 0xaa, 0xb1, 0x88, 0x2e, -- 0x66, 0xb2, 0xa9, 0x95, 0x4c, 0x9b, 0x96, 0x67, 0x04, 0xed, 0xba, 0xe2, 0x49, 0x56, 0xa5, 0x2a, -+ 0xee, 0x79, 0x75, 0xcc, 0x7f, 0x93, 0xd7, 0xe1, 0xe7, 0x13, 0x07, 0x2b, 0x94, 0xa2, 0x1c, 0xec, -+ 0x23, 0xf7, 0x7a, 0xfe, 0xea, 0xd1, 0xff, 0xc1, 0x82, 0xe5, 0x4b, 0x94, 0xa5, 0x73, 0xac, 0xaf, -+ 0x90, 0x4a, 0xf1, 0x4b, 0x70, 0xd8, 0xb8, 0xbf, 0xe2, 0xfe, 0xc9, 0x4c, 0x62, 0x75, 0x24, 0xe1, -+ 0xff, 0xb2, 0xdf, 0xa5, 0xef, 0xa8, 0x7a, 0x35, 0x9d, 0x53, 0x5d, 0x87, 0xaa, 0xb1, 0x88, 0x2e, -+ 0x64, 0xb2, 0xa9, 0x95, 0x4c, 0x9b, 0x96, 0x67, 0x04, 0xed, 0xba, 0xe2, 0x49, 0x56, 0xa5, 0x2a, - 0x57, 0x8e, 0x33, 0x8f, 0x6d, 0x40, 0x42, 0x5d, 0x82, 0xac, 0x19, 0xfb, 0x04, 0xf4, 0xe5, 0x38, -- 0xad, 0x8a, 0xe7, 0xe8, 0x51, 0xb0, 0xc3, 0xe0, 0x9e, 0x4e, 0x93, 0x17, 0x93, 0x23, 0x49, 0x70, -- 0x8f, 0x88, 0x25, 0xfc, 0x1c, 0x14, 0x48, 0x70, 0x0f, 0x35, 0x01, 0x42, 0xd7, 0xef, 0xd1, 0x5b, -+ 0xad, 0x8a, 0xe7, 0xe8, 0x61, 0xb0, 0xc3, 0xe0, 0xae, 0x4e, 0x93, 0x17, 0x93, 0x23, 0x49, 0x70, -+ 0x97, 0x88, 0x25, 0xfc, 0x1c, 0x14, 0x48, 0x70, 0x17, 0x35, 0x01, 0x42, 0xd7, 0xef, 0xd1, 0x9b, - 0x71, 0x81, 0x56, 0x23, 0x06, 0xe4, 0x80, 0xbc, 0x64, 0x03, 0x0e, 0x9b, 0x37, 0x92, 0xea, 0x5e, - 0x87, 0x85, 0x57, 0xc7, 0xa6, 0xb8, 0xea, 0x19, 0x71, 0xc9, 0x3e, 0x80, 0x46, 0xe2, 0x36, 0x03, -- 0x09, 0x1c, 0x9d, 0x80, 0x0a, 0x73, 0x6f, 0x0f, 0xe8, 0xb5, 0xc4, 0x05, 0x26, 0x00, 0xbe, 0xca, -- 0x6b, 0xcb, 0x5b, 0x46, 0x82, 0x95, 0x00, 0xd0, 0x13, 0xb0, 0x9c, 0xdc, 0xf9, 0x46, 0x48, 0x77, -+ 0x09, 0x1c, 0x9d, 0x80, 0x0a, 0x73, 0x6f, 0x0d, 0xe8, 0xd5, 0xc4, 0x05, 0x26, 0x00, 0xbe, 0xca, -+ 0x6b, 0xcb, 0x9b, 0x46, 0x82, 0x95, 0x00, 0xd0, 0xe3, 0xb0, 0x9c, 0xdc, 0xf9, 0x7a, 0x48, 0x77, - 0xbc, 0x77, 0x84, 0x86, 0x6b, 0x64, 0x1f, 0x1c, 0x9d, 0x84, 0x43, 0x09, 0x6c, 0x5b, 0x24, 0x32, -- 0xb6, 0x40, 0xcd, 0x82, 0xb9, 0x6c, 0x04, 0xbb, 0x2f, 0xde, 0x1d, 0xbb, 0x03, 0xf1, 0xf8, 0x6a, -- 0xc4, 0x80, 0xe0, 0xdf, 0x5b, 0x70, 0x58, 0xaa, 0x9a, 0xb9, 0xec, 0x2b, 0x69, 0xf5, 0xbf, 0xb0, -- 0x00, 0x99, 0x1c, 0x28, 0xd3, 0xfa, 0x5f, 0xb3, 0xcf, 0xc4, 0x33, 0xa5, 0xaa, 0x28, 0x99, 0x25, -+ 0xb6, 0x40, 0xcd, 0x82, 0xb9, 0x6c, 0x04, 0xbb, 0x2f, 0xde, 0x19, 0xbb, 0x03, 0xf1, 0xf8, 0x6a, -+ 0xc4, 0x80, 0xe0, 0x3f, 0x5a, 0x70, 0x58, 0xaa, 0x9a, 0xb9, 0xec, 0x2b, 0x69, 0xf5, 0xbf, 0xb4, -+ 0x00, 0x99, 0x1c, 0x28, 0xd3, 0xfa, 0x7f, 0xb3, 0xcf, 0xc4, 0x33, 0xa5, 0xaa, 0x28, 0x99, 0x25, - 0x28, 0x69, 0x15, 0x61, 0x28, 0x75, 0x64, 0x3f, 0x4d, 0x34, 0xc6, 0x65, 0x4d, 0x2e, 0x21, 0x44, -- 0x7d, 0x23, 0x07, 0x8a, 0xb7, 0x27, 0x8c, 0x46, 0xaa, 0xa2, 0x16, 0xad, 0x04, 0x01, 0x20, 0xf2, -- 0x8b, 0x9f, 0x45, 0x7d, 0x26, 0xac, 0xc6, 0x4e, 0xce, 0x52, 0x20, 0xa2, 0x07, 0xf8, 0x9f, 0x79, -- 0x58, 0xbc, 0x15, 0x0c, 0xc6, 0x49, 0xd0, 0xfc, 0x2a, 0x05, 0x8c, 0x54, 0x99, 0x5f, 0xd4, 0x65, -+ 0x7d, 0x23, 0x07, 0x8a, 0xb7, 0x26, 0x8c, 0x46, 0xaa, 0xa2, 0x16, 0xad, 0x04, 0x01, 0x20, 0xf2, -+ 0x8b, 0x9f, 0x45, 0x7d, 0x26, 0xac, 0xc6, 0x4e, 0xce, 0x52, 0x20, 0xa2, 0x07, 0xf8, 0x5f, 0x79, -+ 0x58, 0xbc, 0x19, 0x0c, 0xc6, 0x49, 0xd0, 0xfc, 0x2a, 0x05, 0x8c, 0x54, 0x99, 0x5f, 0xd4, 0x65, - 0x3e, 0x02, 0x3b, 0x62, 0x74, 0x24, 0x2c, 0xab, 0x40, 0xc4, 0x18, 0x61, 0xa8, 0x31, 0x37, 0xec, - 0x51, 0x26, 0x8b, 0xa7, 0x46, 0x49, 0x64, 0xb5, 0x29, 0x18, 0x5a, 0x83, 0xaa, 0xdb, 0xeb, 0x85, -- 0xb4, 0xe7, 0x32, 0xda, 0x9e, 0x34, 0x16, 0xc4, 0x61, 0x26, 0x08, 0x5d, 0x85, 0xa5, 0x8e, 0xdb, -- 0xe9, 0x7b, 0x7e, 0xef, 0xfa, 0x88, 0x79, 0x81, 0x1f, 0x35, 0xca, 0x22, 0x74, 0x9c, 0x58, 0x37, -+ 0xb4, 0xe7, 0x32, 0xda, 0x9e, 0x34, 0x16, 0xc4, 0x61, 0x26, 0x08, 0x5d, 0x81, 0xa5, 0x8e, 0xdb, -+ 0xe9, 0x7b, 0x7e, 0xef, 0xda, 0x88, 0x79, 0x81, 0x1f, 0x35, 0xca, 0x22, 0x74, 0x9c, 0x58, 0x37, - 0x7f, 0x68, 0x5a, 0xdf, 0x48, 0xe1, 0x28, 0x3f, 0x96, 0xd9, 0x89, 0xdf, 0x80, 0x25, 0x2d, 0x78, - 0x65, 0x1e, 0x67, 0x60, 0xe1, 0x6d, 0x01, 0x99, 0xd1, 0xc2, 0x93, 0xa8, 0x8a, 0x94, 0x46, 0x4b, -- 0xff, 0x54, 0xa1, 0xf9, 0xc7, 0x57, 0xa1, 0x24, 0xd1, 0xd1, 0x09, 0xb3, 0x9c, 0x92, 0x19, 0x25, -+ 0xff, 0x54, 0xa1, 0xf9, 0xc7, 0x57, 0xa0, 0x24, 0xd1, 0xd1, 0x09, 0xb3, 0x9c, 0x92, 0x19, 0x25, - 0x9f, 0xab, 0xda, 0x08, 0x43, 0x49, 0x12, 0x52, 0x46, 0x24, 0xec, 0x4c, 0x42, 0x88, 0xfa, 0xc6, -- 0x7f, 0xb7, 0xe0, 0xc8, 0x26, 0x65, 0xb4, 0xc3, 0x68, 0xf7, 0x92, 0x47, 0x07, 0xdd, 0x2f, 0xb4, -- 0xd2, 0x8f, 0xfb, 0x75, 0x05, 0xa3, 0x5f, 0xc7, 0x7d, 0xd8, 0xc0, 0xf3, 0xe9, 0x96, 0xd1, 0xf0, -- 0x49, 0x00, 0xdc, 0xdb, 0xec, 0xf0, 0x8b, 0xcb, 0x65, 0xf9, 0xdb, 0x90, 0x01, 0x89, 0xad, 0xa5, -- 0x94, 0x58, 0x0b, 0xfe, 0x9e, 0x05, 0x47, 0xb3, 0x5c, 0x2b, 0x25, 0xb5, 0xa0, 0x24, 0x36, 0xcf, -- 0x68, 0x15, 0xa7, 0x76, 0x10, 0x85, 0x86, 0xce, 0xa7, 0xce, 0x17, 0xbf, 0x29, 0xb5, 0x1b, 0x7b, -- 0x53, 0xa7, 0x9e, 0x40, 0x8d, 0x6e, 0x84, 0x81, 0x8b, 0xff, 0xc8, 0x6b, 0x76, 0x93, 0xa6, 0xd0, -- 0x37, 0xb7, 0x55, 0xe5, 0xc7, 0xe5, 0x04, 0xfd, 0x1f, 0xd8, 0x6c, 0x32, 0x52, 0xee, 0xbb, 0x7d, -- 0xe4, 0xb3, 0xa9, 0x73, 0x38, 0xb5, 0xed, 0xe6, 0x64, 0x44, 0x89, 0x40, 0xe1, 0x26, 0xde, 0x71, -- 0xc3, 0xae, 0xe7, 0xbb, 0x03, 0x8f, 0x49, 0x31, 0xda, 0xc4, 0x04, 0x71, 0xbf, 0x31, 0x72, 0xc3, -- 0x48, 0xe7, 0x60, 0x15, 0xe9, 0x37, 0x14, 0x88, 0xe8, 0x81, 0xe8, 0xad, 0xdc, 0xa1, 0xac, 0xd3, -- 0x97, 0xfe, 0x5b, 0xf5, 0x56, 0x04, 0x24, 0xd5, 0x5b, 0x11, 0x10, 0xfc, 0x33, 0xc3, 0x8a, 0xe4, -- 0x63, 0xfb, 0xd2, 0x59, 0x11, 0xfe, 0x56, 0xa2, 0x72, 0x7d, 0x45, 0xa5, 0xf2, 0x17, 0x60, 0xa9, -- 0x9b, 0x5a, 0x39, 0x58, 0xf5, 0xb2, 0x6f, 0x9c, 0x41, 0xc7, 0xe3, 0x44, 0x8f, 0x02, 0x72, 0x80, -- 0x1e, 0x33, 0xca, 0xc9, 0xef, 0x57, 0x4e, 0x22, 0xf5, 0xc2, 0xfd, 0xa5, 0xfe, 0xc4, 0x63, 0x50, -- 0x89, 0x7f, 0x23, 0x44, 0x55, 0x58, 0xb8, 0x74, 0x9d, 0xbc, 0x7e, 0x91, 0x6c, 0x2e, 0xe7, 0x50, -- 0x0d, 0xca, 0xed, 0x8b, 0x1b, 0x2f, 0x8b, 0x99, 0x75, 0xee, 0x57, 0x25, 0x9d, 0x61, 0x84, 0xe8, -- 0xeb, 0x50, 0x94, 0x69, 0xc3, 0xd1, 0x84, 0x39, 0xf3, 0xe7, 0xb3, 0xd5, 0x63, 0xfb, 0xe0, 0x52, -- 0x4a, 0x38, 0x77, 0xc6, 0x42, 0xd7, 0xa0, 0x2a, 0x80, 0xaa, 0x41, 0x7d, 0x22, 0xdb, 0x27, 0x4e, -- 0x51, 0x7a, 0xe4, 0x80, 0x55, 0x83, 0xde, 0x05, 0x28, 0x4a, 0x81, 0x1d, 0xcd, 0x64, 0x77, 0x33, -- 0x6e, 0x93, 0x6a, 0xd9, 0xe3, 0x1c, 0x7a, 0x16, 0xec, 0x9b, 0xae, 0x37, 0x40, 0x46, 0x72, 0x69, -- 0xf4, 0x95, 0x57, 0x8f, 0x66, 0xc1, 0xc6, 0xb1, 0xcf, 0xc7, 0xed, 0xf1, 0x63, 0xd9, 0x1e, 0x9d, -- 0xde, 0xde, 0xd8, 0xbf, 0x10, 0x9f, 0x7c, 0x5d, 0x36, 0x71, 0x75, 0xa7, 0x08, 0x3d, 0x92, 0x3e, -- 0x2a, 0xd3, 0x58, 0x5a, 0x6d, 0x1e, 0xb4, 0x1c, 0x13, 0xdc, 0x82, 0xaa, 0xd1, 0xa5, 0x31, 0xc5, -- 0xba, 0xbf, 0xc5, 0x64, 0x8a, 0x75, 0x46, 0x6b, 0x07, 0xe7, 0xd0, 0x65, 0x28, 0xf3, 0x94, 0x5c, -- 0xfc, 0x9a, 0x73, 0x3c, 0x9b, 0x79, 0x1b, 0x19, 0xd7, 0xea, 0x89, 0xd9, 0x8b, 0x31, 0xa1, 0x6f, -- 0x42, 0xe5, 0x32, 0x65, 0x2a, 0xd4, 0x1c, 0xcb, 0xc6, 0xaa, 0x19, 0x92, 0x4a, 0xc7, 0x3b, 0x9c, -- 0x43, 0x6f, 0x88, 0xea, 0x20, 0xed, 0x69, 0x91, 0x73, 0x80, 0x47, 0x8d, 0xef, 0xb5, 0x76, 0x30, -- 0x42, 0x4c, 0xf9, 0xf5, 0x14, 0x65, 0x15, 0xe0, 0x9d, 0x03, 0x1e, 0x6c, 0x4c, 0xd9, 0xb9, 0xcf, -- 0x7f, 0x3d, 0x70, 0xee, 0xdc, 0x9b, 0xfa, 0xef, 0x0e, 0x9b, 0x2e, 0x73, 0xd1, 0x75, 0x58, 0x12, -- 0xb2, 0x8c, 0xff, 0x0f, 0x91, 0xb2, 0xf9, 0x7d, 0x7f, 0xbe, 0x48, 0xd9, 0xfc, 0xfe, 0x3f, 0x61, -- 0xe0, 0x5c, 0xfb, 0xcd, 0x0f, 0x3e, 0x6e, 0xe6, 0x3e, 0xfc, 0xb8, 0x99, 0xfb, 0xf4, 0xe3, 0xa6, -- 0xf5, 0xdd, 0xdd, 0xa6, 0xf5, 0xcb, 0xdd, 0xa6, 0xf5, 0xfe, 0x6e, 0xd3, 0xfa, 0x60, 0xb7, 0x69, -- 0xfd, 0x65, 0xb7, 0x69, 0xfd, 0x75, 0xb7, 0x99, 0xfb, 0x74, 0xb7, 0x69, 0xbd, 0xfb, 0x49, 0x33, -- 0xf7, 0xc1, 0x27, 0xcd, 0xdc, 0x87, 0x9f, 0x34, 0x73, 0xdf, 0x7e, 0xfc, 0xfe, 0x95, 0xb0, 0x74, -- 0x8b, 0x25, 0xf1, 0xf5, 0xd4, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf9, 0xfc, 0x48, 0x69, 0xc6, -- 0x23, 0x00, 0x00, -+ 0x3f, 0xca, 0xc3, 0x91, 0x4d, 0xca, 0x68, 0x87, 0xd1, 0xee, 0x45, 0x8f, 0x0e, 0xba, 0x5f, 0x68, -+ 0xa5, 0x1f, 0xf7, 0xeb, 0x0a, 0x46, 0xbf, 0x8e, 0xfb, 0xb0, 0x81, 0xe7, 0xd3, 0x2d, 0xa3, 0xe1, -+ 0x93, 0x00, 0x12, 0x19, 0x15, 0xcd, 0x56, 0x90, 0xb6, 0x91, 0x92, 0x61, 0x23, 0x49, 0x9b, 0x6f, -+ 0x21, 0xd5, 0x99, 0xd4, 0x75, 0x65, 0x39, 0x29, 0x4a, 0xf1, 0xef, 0x2c, 0x38, 0x9a, 0x95, 0x8b, -+ 0x52, 0xe3, 0x8b, 0x50, 0xda, 0x11, 0x90, 0xfd, 0xcd, 0xe4, 0xd4, 0x0e, 0xd9, 0x8f, 0x90, 0xa8, -+ 0x66, 0x3f, 0x42, 0x42, 0xd0, 0x63, 0xa9, 0x9f, 0xa1, 0xda, 0x2b, 0x7b, 0x53, 0xe7, 0x90, 0x00, -+ 0x18, 0xb8, 0x8a, 0x99, 0xd3, 0xf1, 0xc5, 0x0b, 0x49, 0xa3, 0x43, 0x42, 0x4c, 0xc2, 0xaa, 0x6b, -+ 0xf9, 0x67, 0x0b, 0x16, 0x53, 0x17, 0x11, 0x22, 0xe2, 0x4f, 0x40, 0x85, 0x07, 0x39, 0x41, 0x8f, -+ 0x81, 0xcd, 0x26, 0x23, 0x15, 0x15, 0xda, 0x47, 0x3e, 0x9b, 0x3a, 0x87, 0x53, 0xdb, 0x6e, 0x4c, -+ 0x46, 0x94, 0x08, 0x14, 0xfe, 0x72, 0x3a, 0x6e, 0xd8, 0xf5, 0x7c, 0x77, 0xe0, 0x31, 0xa9, 0x1d, -+ 0x9b, 0x98, 0x20, 0xee, 0x8e, 0x46, 0x6e, 0x18, 0xe9, 0xd4, 0xae, 0x22, 0xdd, 0x91, 0x02, 0x11, -+ 0x3d, 0x10, 0x2d, 0x9b, 0xdb, 0x94, 0x75, 0xfa, 0x32, 0x2c, 0xa8, 0x96, 0x8d, 0x80, 0xa4, 0x5a, -+ 0x36, 0x02, 0x82, 0x7f, 0x6e, 0x25, 0xc6, 0x29, 0xdf, 0xf0, 0x97, 0xce, 0x38, 0xf1, 0xb7, 0x13, -+ 0x3b, 0xd1, 0x57, 0x54, 0x76, 0xf2, 0x02, 0x2c, 0x75, 0x53, 0x2b, 0x07, 0xdb, 0x8b, 0x6c, 0x47, -+ 0x67, 0xd0, 0xf1, 0x38, 0xd1, 0xa3, 0x80, 0x1c, 0xa0, 0xc7, 0x8c, 0x72, 0xf2, 0xfb, 0x95, 0x93, -+ 0x48, 0xbd, 0x70, 0x6f, 0xa9, 0x3f, 0xfe, 0x08, 0x54, 0xe2, 0x9f, 0x1e, 0x51, 0x15, 0x16, 0x2e, -+ 0x5e, 0x23, 0xaf, 0x5f, 0x20, 0x9b, 0xcb, 0x39, 0x54, 0x83, 0x72, 0xfb, 0xc2, 0xc6, 0xcb, 0x62, -+ 0x66, 0x9d, 0xfb, 0x75, 0x49, 0x27, 0x2e, 0x21, 0xfa, 0x06, 0x14, 0x65, 0x36, 0x72, 0x34, 0x61, -+ 0xce, 0xfc, 0x55, 0x6e, 0xf5, 0xd8, 0x3e, 0xb8, 0x94, 0x12, 0xce, 0x9d, 0xb1, 0xd0, 0x55, 0xa8, -+ 0x0a, 0xa0, 0xea, 0x7b, 0x9f, 0xc8, 0xb6, 0x9f, 0x53, 0x94, 0x1e, 0x3a, 0x60, 0xd5, 0xa0, 0x77, -+ 0x1e, 0x8a, 0x52, 0x60, 0x47, 0x33, 0x49, 0xe3, 0x8c, 0xdb, 0xa4, 0x7e, 0x09, 0xc0, 0x39, 0xf4, -+ 0x2c, 0xd8, 0x37, 0x5c, 0x6f, 0x80, 0x8c, 0x9c, 0xd5, 0x68, 0x57, 0xaf, 0x1e, 0xcd, 0x82, 0x8d, -+ 0x63, 0x9f, 0x8f, 0xbb, 0xee, 0xc7, 0xb2, 0xad, 0x3f, 0xbd, 0xbd, 0xb1, 0x7f, 0x21, 0x3e, 0xf9, -+ 0x9a, 0xec, 0x0d, 0xeb, 0x06, 0x14, 0x7a, 0x28, 0x7d, 0x54, 0xa6, 0x5f, 0xb5, 0xda, 0x3c, 0x68, -+ 0x39, 0x26, 0xb8, 0x05, 0x55, 0xa3, 0xf9, 0x63, 0x8a, 0x75, 0x7f, 0xe7, 0xca, 0x14, 0xeb, 0x8c, -+ 0x8e, 0x11, 0xce, 0xa1, 0x4b, 0x50, 0xe6, 0x99, 0xbe, 0xf8, 0x91, 0xe8, 0x78, 0x36, 0xa1, 0x37, -+ 0x12, 0xb9, 0xd5, 0x13, 0xb3, 0x17, 0x63, 0x42, 0xdf, 0x82, 0xca, 0x25, 0xca, 0x54, 0x04, 0x3b, -+ 0x96, 0x0d, 0x81, 0x33, 0x24, 0x95, 0x0e, 0xa3, 0x38, 0x87, 0xde, 0x10, 0x45, 0x47, 0xda, 0x3d, -+ 0x23, 0xe7, 0x00, 0x37, 0x1c, 0xdf, 0x6b, 0xed, 0x60, 0x84, 0x98, 0xf2, 0xeb, 0x29, 0xca, 0x2a, -+ 0x6f, 0x70, 0x0e, 0x78, 0xb0, 0x31, 0x65, 0xe7, 0x1e, 0x7f, 0x21, 0xc1, 0xb9, 0x73, 0x6f, 0xea, -+ 0x7f, 0x51, 0x6c, 0xba, 0xcc, 0x45, 0xd7, 0x60, 0x49, 0xc8, 0x32, 0xfe, 0x9b, 0x45, 0xca, 0xe6, -+ 0xf7, 0xfd, 0xa7, 0x23, 0x65, 0xf3, 0xfb, 0xff, 0xdb, 0x81, 0x73, 0xed, 0x37, 0x3f, 0xf8, 0xb8, -+ 0x99, 0xfb, 0xf0, 0xe3, 0x66, 0xee, 0xd3, 0x8f, 0x9b, 0xd6, 0xf7, 0x76, 0x9b, 0xd6, 0xaf, 0x76, -+ 0x9b, 0xd6, 0xfb, 0xbb, 0x4d, 0xeb, 0x83, 0xdd, 0xa6, 0xf5, 0xb7, 0xdd, 0xa6, 0xf5, 0xf7, 0xdd, -+ 0x66, 0xee, 0xd3, 0xdd, 0xa6, 0xf5, 0xee, 0x27, 0xcd, 0xdc, 0x07, 0x9f, 0x34, 0x73, 0x1f, 0x7e, -+ 0xd2, 0xcc, 0x7d, 0xe7, 0xd1, 0x7b, 0x17, 0xd8, 0xd2, 0x2d, 0x96, 0xc4, 0xd7, 0x93, 0xff, 0x0e, -+ 0x00, 0x00, 0xff, 0xff, 0xc7, 0x66, 0x64, 0x64, 0x1d, 0x24, 0x00, 0x00, - } - - func (x Direction) String() string { -@@ -4932,12 +4957,18 @@ func (this *DetectedFieldsRequest) Equal(that interface{}) bool { - if this.LineLimit != that1.LineLimit { - return false - } -- if this.FieldLimit != that1.FieldLimit { -+ if this.Limit != that1.Limit { - return false - } - if this.Step != that1.Step { - return false - } -+ if this.Values != that1.Values { -+ return false -+ } -+ if this.Name != that1.Name { -+ return false -+ } - return true - } - func (this *DetectedFieldsResponse) Equal(that interface{}) bool { -@@ -4967,9 +4998,17 @@ func (this *DetectedFieldsResponse) Equal(that interface{}) bool { - return false - } - } -- if this.FieldLimit != that1.FieldLimit { -+ if this.Limit != that1.Limit { -+ return false -+ } -+ if len(this.Values) != len(that1.Values) { - return false - } -+ for i := range this.Values { -+ if this.Values[i] != that1.Values[i] { -+ return false -+ } -+ } - return true - } - func (this *DetectedField) Equal(that interface{}) bool { -@@ -5741,14 +5780,16 @@ func (this *DetectedFieldsRequest) GoString() string { - if this == nil { - return ""nil"" - } -- s := make([]string, 0, 10) -+ s := make([]string, 0, 12) - s = append(s, ""&logproto.DetectedFieldsRequest{"") - s = append(s, ""Start: ""+fmt.Sprintf(""%#v"", this.Start)+"",\n"") - s = append(s, ""End: ""+fmt.Sprintf(""%#v"", this.End)+"",\n"") - s = append(s, ""Query: ""+fmt.Sprintf(""%#v"", this.Query)+"",\n"") - s = append(s, ""LineLimit: ""+fmt.Sprintf(""%#v"", this.LineLimit)+"",\n"") -- s = append(s, ""FieldLimit: ""+fmt.Sprintf(""%#v"", this.FieldLimit)+"",\n"") -+ s = append(s, ""Limit: ""+fmt.Sprintf(""%#v"", this.Limit)+"",\n"") - s = append(s, ""Step: ""+fmt.Sprintf(""%#v"", this.Step)+"",\n"") -+ s = append(s, ""Values: ""+fmt.Sprintf(""%#v"", this.Values)+"",\n"") -+ s = append(s, ""Name: ""+fmt.Sprintf(""%#v"", this.Name)+"",\n"") - s = append(s, ""}"") - return strings.Join(s, """") - } -@@ -5756,12 +5797,13 @@ func (this *DetectedFieldsResponse) GoString() string { - if this == nil { - return ""nil"" - } -- s := make([]string, 0, 6) -+ s := make([]string, 0, 7) - s = append(s, ""&logproto.DetectedFieldsResponse{"") - if this.Fields != nil { - s = append(s, ""Fields: ""+fmt.Sprintf(""%#v"", this.Fields)+"",\n"") - } -- s = append(s, ""FieldLimit: ""+fmt.Sprintf(""%#v"", this.FieldLimit)+"",\n"") -+ s = append(s, ""Limit: ""+fmt.Sprintf(""%#v"", this.Limit)+"",\n"") -+ s = append(s, ""Values: ""+fmt.Sprintf(""%#v"", this.Values)+"",\n"") - s = append(s, ""}"") - return strings.Join(s, """") - } -@@ -8637,13 +8679,30 @@ func (m *DetectedFieldsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - _ = i - var l int - _ = l -+ if len(m.Name) > 0 { -+ i -= len(m.Name) -+ copy(dAtA[i:], m.Name) -+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Name))) -+ i-- -+ dAtA[i] = 0x42 -+ } -+ if m.Values { -+ i-- -+ if m.Values { -+ dAtA[i] = 1 -+ } else { -+ dAtA[i] = 0 -+ } -+ i-- -+ dAtA[i] = 0x38 -+ } - if m.Step != 0 { - i = encodeVarintLogproto(dAtA, i, uint64(m.Step)) - i-- - dAtA[i] = 0x30 - } -- if m.FieldLimit != 0 { -- i = encodeVarintLogproto(dAtA, i, uint64(m.FieldLimit)) -+ if m.Limit != 0 { -+ i = encodeVarintLogproto(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x28 - } -@@ -8698,8 +8757,17 @@ func (m *DetectedFieldsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) - _ = i - var l int - _ = l -- if m.FieldLimit != 0 { -- i = encodeVarintLogproto(dAtA, i, uint64(m.FieldLimit)) -+ if len(m.Values) > 0 { -+ for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { -+ i -= len(m.Values[iNdEx]) -+ copy(dAtA[i:], m.Values[iNdEx]) -+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Values[iNdEx]))) -+ i-- -+ dAtA[i] = 0x1a -+ } -+ } -+ if m.Limit != 0 { -+ i = encodeVarintLogproto(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x10 - } -@@ -9891,12 +9959,19 @@ func (m *DetectedFieldsRequest) Size() (n int) { - if m.LineLimit != 0 { - n += 1 + sovLogproto(uint64(m.LineLimit)) - } -- if m.FieldLimit != 0 { -- n += 1 + sovLogproto(uint64(m.FieldLimit)) -+ if m.Limit != 0 { -+ n += 1 + sovLogproto(uint64(m.Limit)) - } - if m.Step != 0 { - n += 1 + sovLogproto(uint64(m.Step)) - } -+ if m.Values { -+ n += 2 -+ } -+ l = len(m.Name) -+ if l > 0 { -+ n += 1 + l + sovLogproto(uint64(l)) -+ } - return n - } - -@@ -9912,8 +9987,14 @@ func (m *DetectedFieldsResponse) Size() (n int) { - n += 1 + l + sovLogproto(uint64(l)) - } - } -- if m.FieldLimit != 0 { -- n += 1 + sovLogproto(uint64(m.FieldLimit)) -+ if m.Limit != 0 { -+ n += 1 + sovLogproto(uint64(m.Limit)) -+ } -+ if len(m.Values) > 0 { -+ for _, s := range m.Values { -+ l = len(s) -+ n += 1 + l + sovLogproto(uint64(l)) -+ } - } - return n - } -@@ -10663,8 +10744,10 @@ func (this *DetectedFieldsRequest) String() string { - `End:` + strings.Replace(strings.Replace(fmt.Sprintf(""%v"", this.End), ""Timestamp"", ""types.Timestamp"", 1), `&`, ``, 1) + `,`, - `Query:` + fmt.Sprintf(""%v"", this.Query) + `,`, - `LineLimit:` + fmt.Sprintf(""%v"", this.LineLimit) + `,`, -- `FieldLimit:` + fmt.Sprintf(""%v"", this.FieldLimit) + `,`, -+ `Limit:` + fmt.Sprintf(""%v"", this.Limit) + `,`, - `Step:` + fmt.Sprintf(""%v"", this.Step) + `,`, -+ `Values:` + fmt.Sprintf(""%v"", this.Values) + `,`, -+ `Name:` + fmt.Sprintf(""%v"", this.Name) + `,`, - `}`, - }, """") - return s -@@ -10680,7 +10763,8 @@ func (this *DetectedFieldsResponse) String() string { - repeatedStringForFields += ""}"" - s := strings.Join([]string{`&DetectedFieldsResponse{`, - `Fields:` + repeatedStringForFields + `,`, -- `FieldLimit:` + fmt.Sprintf(""%v"", this.FieldLimit) + `,`, -+ `Limit:` + fmt.Sprintf(""%v"", this.Limit) + `,`, -+ `Values:` + fmt.Sprintf(""%v"", this.Values) + `,`, - `}`, - }, """") - return s -@@ -17352,9 +17436,9 @@ func (m *DetectedFieldsRequest) Unmarshal(dAtA []byte) error { - } - case 5: - if wireType != 0 { -- return fmt.Errorf(""proto: wrong wireType = %d for field FieldLimit"", wireType) -+ return fmt.Errorf(""proto: wrong wireType = %d for field Limit"", wireType) - } -- m.FieldLimit = 0 -+ m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogproto -@@ -17364,7 +17448,7 @@ func (m *DetectedFieldsRequest) Unmarshal(dAtA []byte) error { - } - b := dAtA[iNdEx] - iNdEx++ -- m.FieldLimit |= uint32(b&0x7F) << shift -+ m.Limit |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } -@@ -17388,6 +17472,58 @@ func (m *DetectedFieldsRequest) Unmarshal(dAtA []byte) error { - break - } - } -+ case 7: -+ if wireType != 0 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Values"", wireType) -+ } -+ var v int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowLogproto -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ v |= int(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ m.Values = bool(v != 0) -+ case 8: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Name"", wireType) -+ } -+ var stringLen uint64 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowLogproto -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ stringLen |= uint64(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ intStringLen := int(stringLen) -+ if intStringLen < 0 { -+ return ErrInvalidLengthLogproto -+ } -+ postIndex := iNdEx + intStringLen -+ if postIndex < 0 { -+ return ErrInvalidLengthLogproto -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ m.Name = string(dAtA[iNdEx:postIndex]) -+ iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLogproto(dAtA[iNdEx:]) -@@ -17477,9 +17613,28 @@ func (m *DetectedFieldsResponse) Unmarshal(dAtA []byte) error { - iNdEx = postIndex - case 2: - if wireType != 0 { -- return fmt.Errorf(""proto: wrong wireType = %d for field FieldLimit"", wireType) -+ return fmt.Errorf(""proto: wrong wireType = %d for field Limit"", wireType) -+ } -+ m.Limit = 0 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowLogproto -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ m.Limit |= uint32(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ case 3: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Values"", wireType) - } -- m.FieldLimit = 0 -+ var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogproto -@@ -17489,11 +17644,24 @@ func (m *DetectedFieldsResponse) Unmarshal(dAtA []byte) error { - } - b := dAtA[iNdEx] - iNdEx++ -- m.FieldLimit |= uint32(b&0x7F) << shift -+ stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } -+ intStringLen := int(stringLen) -+ if intStringLen < 0 { -+ return ErrInvalidLengthLogproto -+ } -+ postIndex := iNdEx + intStringLen -+ if postIndex < 0 { -+ return ErrInvalidLengthLogproto -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) -+ iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLogproto(dAtA[iNdEx:]) -diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto -index 18f9d6c01adb6..5bf2d37e2d6bf 100644 ---- a/pkg/logproto/logproto.proto -+++ b/pkg/logproto/logproto.proto -@@ -459,13 +459,16 @@ message DetectedFieldsRequest { - ]; - string query = 3; // Naming this query instead of match because this should be with queryrangebase.Request interface - uint32 lineLimit = 4; -- uint32 fieldLimit = 5; -+ uint32 limit = 5; - int64 step = 6; -+ bool values = 7; // True to fetch detected field values, false for fetch detected field label names. -+ string name = 8; // Name of the detected field to fetch values for. - } - - message DetectedFieldsResponse { -- repeated DetectedField fields = 1; -- uint32 fieldLimit = 2 [(gogoproto.jsontag) = ""fieldLimit,omitempty""]; -+ repeated DetectedField fields = 1 [(gogoproto.jsontag) = ""fields,omitempty""]; -+ uint32 limit = 2 [(gogoproto.jsontag) = ""limit,omitempty""]; -+ repeated string values = 3 [(gogoproto.jsontag) = ""values,omitempty""]; - } - - // TODO: make the detected field include the serialized sketch -diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go -index 1a72cb3c5b91e..4c718722a509f 100644 ---- a/pkg/loki/modules.go -+++ b/pkg/loki/modules.go -@@ -1177,6 +1177,7 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) { - t.Server.HTTP.Path(""/loki/api/v1/patterns"").Methods(""GET"", ""POST"").Handler(frontendHandler) - t.Server.HTTP.Path(""/loki/api/v1/detected_labels"").Methods(""GET"", ""POST"").Handler(frontendHandler) - t.Server.HTTP.Path(""/loki/api/v1/detected_fields"").Methods(""GET"", ""POST"").Handler(frontendHandler) -+ t.Server.HTTP.Path(""/loki/api/v1/detected_field/{name}/values"").Methods(""GET"", ""POST"").Handler(frontendHandler) - t.Server.HTTP.Path(""/loki/api/v1/index/stats"").Methods(""GET"", ""POST"").Handler(frontendHandler) - t.Server.HTTP.Path(""/loki/api/v1/index/shards"").Methods(""GET"", ""POST"").Handler(frontendHandler) - t.Server.HTTP.Path(""/loki/api/v1/index/volume"").Methods(""GET"", ""POST"").Handler(frontendHandler) -diff --git a/pkg/lokifrontend/frontend/v1/frontend_test.go b/pkg/lokifrontend/frontend/v1/frontend_test.go -index b7e4061d804a0..00235af5ce99c 100644 ---- a/pkg/lokifrontend/frontend/v1/frontend_test.go -+++ b/pkg/lokifrontend/frontend/v1/frontend_test.go -@@ -40,9 +40,9 @@ import ( - ) - - const ( -- query = ""/api/v1/query_range?end=1536716898&query=sum%28container_memory_rss%29+by+%28namespace%29&start=1536673680&step=120"" -+ query = ""/loki/api/v1/query_range?end=1536716898&query=sum%28container_memory_rss%29+by+%28namespace%29&start=1536673680&step=120"" - responseBody = `{""status"":""success"",""data"":{""resultType"":""Matrix"",""result"":[{""metric"":{""foo"":""bar""},""values"":[[1536673680,""137""],[1536673780,""137""]]}]}}` -- labelQuery = `/api/v1/label/foo/values` -+ labelQuery = `/prom/label/foo/values` - ) - - func TestFrontend(t *testing.T) { -diff --git a/pkg/querier-rf1/http.go b/pkg/querier-rf1/http.go -index baa820a99460f..2e10e5c576348 100644 ---- a/pkg/querier-rf1/http.go -+++ b/pkg/querier-rf1/http.go -@@ -277,8 +277,8 @@ func (q *QuerierAPI) DetectedFieldsHandler(ctx context.Context, req *logproto.De - ""msg"", ""queried store for detected fields that does not support it, no response from querier.DetectedFields"", - ) - return &logproto.DetectedFieldsResponse{ -- Fields: []*logproto.DetectedField{}, -- FieldLimit: req.GetFieldLimit(), -+ Fields: []*logproto.DetectedField{}, -+ Limit: req.GetLimit(), - }, nil - } - return resp, nil -diff --git a/pkg/querier-rf1/querier.go b/pkg/querier-rf1/querier.go -index a2ff7100376ad..34149733dd041 100644 ---- a/pkg/querier-rf1/querier.go -+++ b/pkg/querier-rf1/querier.go -@@ -691,7 +691,7 @@ func (q *Rf1Querier) DetectedFields(ctx context.Context, req *logproto.DetectedF - return nil, err - } - -- detectedFields := parseDetectedFields(ctx, req.FieldLimit, streams) -+ detectedFields := parseDetectedFields(ctx, req.Limit, streams) - - fields := make([]*logproto.DetectedField, len(detectedFields)) - fieldCount := 0 -@@ -714,8 +714,8 @@ func (q *Rf1Querier) DetectedFields(ctx context.Context, req *logproto.DetectedF - } - - return &logproto.DetectedFieldsResponse{ -- Fields: fields, -- FieldLimit: req.GetFieldLimit(), -+ Fields: fields, -+ Limit: req.GetLimit(), - }, nil - } - -diff --git a/pkg/querier/http.go b/pkg/querier/http.go -index 862f9e2a20138..c78005ffc99a3 100644 ---- a/pkg/querier/http.go -+++ b/pkg/querier/http.go -@@ -377,8 +377,8 @@ func (q *QuerierAPI) DetectedFieldsHandler(ctx context.Context, req *logproto.De - ""msg"", ""queried store for detected fields that does not support it, no response from querier.DetectedFields"", - ) - return &logproto.DetectedFieldsResponse{ -- Fields: []*logproto.DetectedField{}, -- FieldLimit: req.GetFieldLimit(), -+ Fields: []*logproto.DetectedField{}, -+ Limit: req.GetLimit(), - }, nil - } - return resp, nil -diff --git a/pkg/querier/multi_tenant_querier.go b/pkg/querier/multi_tenant_querier.go -index 961b35a916514..86fb7ecea68ae 100644 ---- a/pkg/querier/multi_tenant_querier.go -+++ b/pkg/querier/multi_tenant_querier.go -@@ -303,8 +303,8 @@ func (q *MultiTenantQuerier) DetectedFields(ctx context.Context, req *logproto.D - ) - - return &logproto.DetectedFieldsResponse{ -- Fields: []*logproto.DetectedField{}, -- FieldLimit: req.GetFieldLimit(), -+ Fields: []*logproto.DetectedField{}, -+ Limit: req.GetLimit(), - }, nil - } - -diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go -index b6000b5479ecb..7c7f973b8c90c 100644 ---- a/pkg/querier/querier.go -+++ b/pkg/querier/querier.go -@@ -1104,7 +1104,7 @@ func (q *SingleTenantQuerier) DetectedFields(ctx context.Context, req *logproto. - return nil, err - } - -- detectedFields := parseDetectedFields(req.FieldLimit, streams) -+ detectedFields := parseDetectedFields(req.Limit, streams) - - fields := make([]*logproto.DetectedField, len(detectedFields)) - fieldCount := 0 -@@ -1130,8 +1130,8 @@ func (q *SingleTenantQuerier) DetectedFields(ctx context.Context, req *logproto. - } - - return &logproto.DetectedFieldsResponse{ -- Fields: fields, -- FieldLimit: req.GetFieldLimit(), -+ Fields: fields, -+ Limit: req.GetLimit(), - }, nil - } - -diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go -index 5212d9bdebb52..c957d07af6119 100644 ---- a/pkg/querier/queryrange/codec.go -+++ b/pkg/querier/queryrange/codec.go -@@ -331,7 +331,6 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer - } - - disableCacheReq := false -- - if strings.ToLower(strings.TrimSpace(r.Header.Get(cacheControlHeader))) == noCacheVal { - disableCacheReq = true - } -@@ -920,11 +919,11 @@ func (c Codec) EncodeRequest(ctx context.Context, r queryrangebase.Request) (*ht - return req.WithContext(ctx), nil - case *DetectedFieldsRequest: - params := url.Values{ -- ""query"": []string{request.GetQuery()}, -- ""start"": []string{fmt.Sprintf(""%d"", request.Start.UnixNano())}, -- ""end"": []string{fmt.Sprintf(""%d"", request.End.UnixNano())}, -- ""line_limit"": []string{fmt.Sprintf(""%d"", request.GetLineLimit())}, -- ""field_limit"": []string{fmt.Sprintf(""%d"", request.GetFieldLimit())}, -+ ""query"": []string{request.GetQuery()}, -+ ""start"": []string{fmt.Sprintf(""%d"", request.Start.UnixNano())}, -+ ""end"": []string{fmt.Sprintf(""%d"", request.End.UnixNano())}, -+ ""line_limit"": []string{fmt.Sprintf(""%d"", request.GetLineLimit())}, -+ ""limit"": []string{fmt.Sprintf(""%d"", request.GetLimit())}, - } - - if request.Step != 0 { -@@ -932,7 +931,7 @@ func (c Codec) EncodeRequest(ctx context.Context, r queryrangebase.Request) (*ht - } - - u := &url.URL{ -- Path: ""/loki/api/v1/detected_fields"", -+ Path: request.Path(), - RawQuery: params.Encode(), - } - req := &http.Request{ -@@ -1014,6 +1013,10 @@ func (c Codec) Path(r queryrangebase.Request) string { - case *logproto.VolumeRequest: - return ""/loki/api/v1/index/volume_range"" - case *DetectedFieldsRequest: -+ if request.Values { -+ // This request contains user-generated input in the URL, which is not safe to reflect in the route path. -+ return ""loki/api/v1/detected_field/values"" -+ } - return ""/loki/api/v1/detected_fields"" - case *logproto.QueryPatternsRequest: - return ""/loki/api/v1/patterns"" -@@ -1548,23 +1551,30 @@ func (Codec) MergeResponse(responses ...queryrangebase.Response) (queryrangebase - case *DetectedFieldsResponse: - resp0 := responses[0].(*DetectedFieldsResponse) - headers := resp0.Headers -- fieldLimit := resp0.Response.GetFieldLimit() -+ limit := resp0.Response.GetLimit() - - fields := []*logproto.DetectedField{} -+ values := []string{} - for _, r := range responses { - fields = append(fields, r.(*DetectedFieldsResponse).Response.Fields...) -+ values = append(values, r.(*DetectedFieldsResponse).Response.Values...) - } - -- mergedFields, err := detected.MergeFields(fields, fieldLimit) -+ mergedFields, err := detected.MergeFields(fields, limit) -+ if err != nil { -+ return nil, err -+ } - -+ mergedValues, err := detected.MergeValues(values, limit) - if err != nil { - return nil, err - } - - return &DetectedFieldsResponse{ - Response: &logproto.DetectedFieldsResponse{ -- Fields: mergedFields, -- FieldLimit: 0, -+ Fields: mergedFields, -+ Values: mergedValues, -+ Limit: limit, - }, - Headers: headers, - }, nil -@@ -2292,12 +2302,12 @@ type DetectedFieldsRequest struct { - func NewDetectedFieldsRequest(start, end time.Time, lineLimit, fieldLimit uint32, step int64, query, path string) *DetectedFieldsRequest { - return &DetectedFieldsRequest{ - DetectedFieldsRequest: logproto.DetectedFieldsRequest{ -- Start: start, -- End: end, -- Query: query, -- LineLimit: lineLimit, -- FieldLimit: fieldLimit, -- Step: step, -+ Start: start, -+ End: end, -+ Query: query, -+ LineLimit: lineLimit, -+ Limit: fieldLimit, -+ Step: step, - }, - path: path, - } -@@ -2331,8 +2341,8 @@ func (r *DetectedFieldsRequest) GetLineLimit() uint32 { - return r.LineLimit - } - --func (r *DetectedFieldsRequest) GetFieldLimit() uint32 { -- return r.FieldLimit -+func (r *DetectedFieldsRequest) GetLimit() uint32 { -+ return r.Limit - } - - func (r *DetectedFieldsRequest) Path() string { -@@ -2364,7 +2374,7 @@ func (r *DetectedFieldsRequest) LogToSpan(sp opentracing.Span) { - otlog.String(""query"", r.GetQuery()), - otlog.Int64(""step (ms)"", r.GetStep()), - otlog.Int64(""line_limit"", int64(r.GetLineLimit())), -- otlog.Int64(""field_limit"", int64(r.GetFieldLimit())), -+ otlog.Int64(""limit"", int64(r.GetLimit())), - otlog.String(""step"", fmt.Sprintf(""%d"", r.GetStep())), - ) - } -diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go -index f5d00e263d0f6..e52bb1ec50a0a 100644 ---- a/pkg/querier/queryrange/codec_test.go -+++ b/pkg/querier/queryrange/codec_test.go -@@ -47,7 +47,6 @@ var ( - ) - - func Test_codec_EncodeDecodeRequest(t *testing.T) { -- - ctx := user.InjectOrgID(context.Background(), ""1"") - - tests := []struct { -@@ -113,18 +112,22 @@ func Test_codec_EncodeDecodeRequest(t *testing.T) { - StartTs: start, - EndTs: end, - }, false}, -- {""labels"", func() (*http.Request, error) { -- return http.NewRequest(http.MethodGet, -- fmt.Sprintf(`/label?start=%d&end=%d&query={foo=""bar""}`, start.UnixNano(), end.UnixNano()), nil) -- }, NewLabelRequest(start, end, `{foo=""bar""}`, """", ""/label""), -- false}, -- {""label_values"", func() (*http.Request, error) { -- req, err := http.NewRequest(http.MethodGet, -- fmt.Sprintf(`/label/test/values?start=%d&end=%d&query={foo=""bar""}`, start.UnixNano(), end.UnixNano()), nil) -- req = mux.SetURLVars(req, map[string]string{""name"": ""test""}) -- return req, err -- }, NewLabelRequest(start, end, `{foo=""bar""}`, ""test"", ""/label/test/values""), -- false}, -+ { -+ ""labels"", func() (*http.Request, error) { -+ return http.NewRequest(http.MethodGet, -+ fmt.Sprintf(`/loki/api/v1/labels?start=%d&end=%d&query={foo=""bar""}`, start.UnixNano(), end.UnixNano()), nil) -+ }, NewLabelRequest(start, end, `{foo=""bar""}`, """", ""/loki/api/v1/labels""), -+ false, -+ }, -+ { -+ ""label_values"", func() (*http.Request, error) { -+ req, err := http.NewRequest(http.MethodGet, -+ fmt.Sprintf(`/loki/api/v1/label/test/values?start=%d&end=%d&query={foo=""bar""}`, start.UnixNano(), end.UnixNano()), nil) -+ req = mux.SetURLVars(req, map[string]string{""name"": ""test""}) -+ return req, err -+ }, NewLabelRequest(start, end, `{foo=""bar""}`, ""test"", ""/loki/api/v1/label/test/values""), -+ false, -+ }, - {""index_stats"", func() (*http.Request, error) { - return DefaultCodec.EncodeRequest(ctx, &logproto.IndexStatsRequest{ - From: model.TimeFromUnixNano(start.UnixNano()), -@@ -205,26 +208,57 @@ func Test_codec_EncodeDecodeRequest(t *testing.T) { - {""detected_fields"", func() (*http.Request, error) { - return DefaultCodec.EncodeRequest(ctx, &DetectedFieldsRequest{ - logproto.DetectedFieldsRequest{ -- Query: `{foo=""bar""}`, -- Start: start, -- End: end, -- Step: 30 * 1e3, // step is expected in ms; default is 0 or no step -- LineLimit: 100, -- FieldLimit: 100, -+ Query: `{foo=""bar""}`, -+ Start: start, -+ End: end, -+ Step: 30 * 1e3, // step is expected in ms; default is 0 or no step -+ LineLimit: 100, -+ Limit: 100, - }, - ""/loki/api/v1/detected_fields"", - }) - }, &DetectedFieldsRequest{ - logproto.DetectedFieldsRequest{ -- Query: `{foo=""bar""}`, -- Start: start, -- End: end, -- Step: 30 * 1e3, // step is expected in ms; default is 0 or no step -- LineLimit: 100, -- FieldLimit: 100, -+ Query: `{foo=""bar""}`, -+ Start: start, -+ End: end, -+ Step: 30 * 1e3, // step is expected in ms; default is 0 or no step -+ LineLimit: 100, -+ Limit: 100, - }, - ""/loki/api/v1/detected_fields"", - }, false}, -+ {""detected field values"", func() (*http.Request, error) { -+ req, err := DefaultCodec.EncodeRequest(ctx, &DetectedFieldsRequest{ -+ logproto.DetectedFieldsRequest{ -+ Query: `{baz=""bar""}`, -+ Start: start, -+ End: end, -+ Step: 30 * 1e3, // step is expected in ms; default is 0 or no step -+ LineLimit: 100, -+ Limit: 100, -+ }, -+ ""/loki/api/v1/detected_field/foo/values"", -+ }) -+ if err != nil { -+ return nil, err -+ } -+ -+ req = mux.SetURLVars(req, map[string]string{""name"": ""foo""}) -+ return req, nil -+ }, &DetectedFieldsRequest{ -+ logproto.DetectedFieldsRequest{ -+ Query: `{baz=""bar""}`, -+ Start: start, -+ End: end, -+ Step: 30 * 1e3, // step is expected in ms; default is 0 or no step -+ LineLimit: 100, -+ Limit: 100, -+ Values: true, -+ Name: ""foo"", -+ }, -+ ""/loki/api/v1/detected_field/foo/values"", -+ }, false}, - {""patterns"", func() (*http.Request, error) { - return DefaultCodec.EncodeRequest(ctx, &logproto.QueryPatternsRequest{ - Start: start, -@@ -586,7 +620,6 @@ func TestLokiSeriesRequestSpanLogging(t *testing.T) { - } - if field.Key == ""end"" { - require.Equal(t, timestamp.Time(end.UnixMilli()).String(), field.ValueString) -- - } - } - } -@@ -619,7 +652,7 @@ func TestLabelRequestSpanLogging(t *testing.T) { - - func Test_codec_DecodeProtobufResponseParity(t *testing.T) { - // test fixtures from pkg/util/marshal_test -- var queryTests = []struct { -+ queryTests := []struct { - name string - actual parser.Value - expected string -@@ -1049,7 +1082,6 @@ func Test_codec_seriesVolume_EncodeRequest(t *testing.T) { - func Test_codec_seriesVolume_DecodeRequest(t *testing.T) { - ctx := user.InjectOrgID(context.Background(), ""1"") - t.Run(""instant queries set a step of 0"", func(t *testing.T) { -- - req := httptest.NewRequest(http.MethodGet, ""/loki/api/v1/index/volume""+ - ""?start=0""+ - ""&end=1""+ -@@ -1106,7 +1138,8 @@ func Test_codec_EncodeResponse(t *testing.T) { - }, - }, - Statistics: statsResult, -- }, matrixString, false, nil}, -+ }, matrixString, false, nil, -+ }, - { - ""loki v1"", ""/loki/api/v1/query_range"", - &LokiResponse{ -@@ -1774,7 +1807,7 @@ func Test_codec_MergeResponse_DetectedFieldsResponse(t *testing.T) { - Fields: []*logproto.DetectedField{ - buildDetctedField(""foo"", 1), - }, -- FieldLimit: 2, -+ Limit: 2, - }, - }, - &DetectedFieldsResponse{ -@@ -1782,7 +1815,7 @@ func Test_codec_MergeResponse_DetectedFieldsResponse(t *testing.T) { - Fields: []*logproto.DetectedField{ - buildDetctedField(""foo"", 3), - }, -- FieldLimit: 2, -+ Limit: 2, - }, - }, - } -@@ -1806,7 +1839,7 @@ func Test_codec_MergeResponse_DetectedFieldsResponse(t *testing.T) { - buildDetctedField(""foo"", 1), - buildDetctedField(""bar"", 42), - }, -- FieldLimit: 2, -+ Limit: 2, - }, - }, - &DetectedFieldsResponse{ -@@ -1815,7 +1848,7 @@ func Test_codec_MergeResponse_DetectedFieldsResponse(t *testing.T) { - buildDetctedField(""foo"", 27), - buildDetctedField(""baz"", 3), - }, -- FieldLimit: 2, -+ Limit: 2, - }, - }, - } -@@ -1842,7 +1875,6 @@ func Test_codec_MergeResponse_DetectedFieldsResponse(t *testing.T) { - - require.Nil(t, baz) - }) -- - } - - type badResponse struct{} -@@ -2593,7 +2625,6 @@ func Benchmark_CodecDecodeSeries(b *testing.B) { - } - }) - } -- - } - - func Benchmark_MergeResponses(b *testing.B) { -diff --git a/pkg/querier/queryrange/detected_fields.go b/pkg/querier/queryrange/detected_fields.go -index 115ba9601573c..3248d3b2cda81 100644 ---- a/pkg/querier/queryrange/detected_fields.go -+++ b/pkg/querier/queryrange/detected_fields.go -@@ -48,40 +48,86 @@ func NewDetectedFieldsHandler( - return resp, nil - } - -- detectedFields := parseDetectedFields(r.FieldLimit, re.Data.Result) -- fields := make([]*logproto.DetectedField, len(detectedFields)) -- fieldCount := 0 -- for k, v := range detectedFields { -- p := v.parsers -- if len(p) == 0 { -- p = nil -- } -- fields[fieldCount] = &logproto.DetectedField{ -- Label: k, -- Type: v.fieldType, -- Cardinality: v.Estimate(), -- Parsers: p, -- } -+ var fields []*logproto.DetectedField -+ var values []string -+ -+ if r.Values && r.Name != """" { -+ values = parseDetectedFieldValues(r.Limit, re.Data.Result, r.Name) -+ } else { -+ detectedFields := parseDetectedFields(r.Limit, re.Data.Result) -+ fields = make([]*logproto.DetectedField, len(detectedFields)) -+ fieldCount := 0 -+ for k, v := range detectedFields { -+ p := v.parsers -+ if len(p) == 0 { -+ p = nil -+ } -+ fields[fieldCount] = &logproto.DetectedField{ -+ Label: k, -+ Type: v.fieldType, -+ Cardinality: v.Estimate(), -+ Parsers: p, -+ } - -- fieldCount++ -+ fieldCount++ -+ } - } - - dfResp := DetectedFieldsResponse{ - Response: &logproto.DetectedFieldsResponse{ - Fields: fields, -+ Values: values, - }, - Headers: re.Headers, - } - - // Otherwise all they get is the field limit, which is a bit confusing -- if len(fields) > 0 { -- dfResp.Response.FieldLimit = r.GetFieldLimit() -+ if len(fields) > 0 || len(values) > 0 { -+ dfResp.Response.Limit = r.GetLimit() - } - - return &dfResp, nil - }) - } - -+func parseDetectedFieldValues(limit uint32, streams []push.Stream, name string) []string { -+ values := map[string]struct{}{} -+ for _, stream := range streams { -+ streamLbls, err := syntax.ParseLabels(stream.Labels) -+ if err != nil { -+ streamLbls = labels.EmptyLabels() -+ } -+ -+ for _, entry := range stream.Entries { -+ if len(values) >= int(limit) { -+ break -+ } -+ -+ structuredMetadata := getStructuredMetadata(entry) -+ if vals, ok := structuredMetadata[name]; ok { -+ for _, v := range vals { -+ values[v] = struct{}{} -+ } -+ } -+ -+ entryLbls := logql_log.NewBaseLabelsBuilder().ForLabels(streamLbls, streamLbls.Hash()) -+ parsedLabels, _ := parseEntry(entry, entryLbls) -+ if vals, ok := parsedLabels[name]; ok { -+ for _, v := range vals { -+ values[v] = struct{}{} -+ } -+ } -+ } -+ } -+ -+ response := make([]string, 0, len(values)) -+ for v := range values { -+ response = append(response, v) -+ } -+ -+ return response -+} -+ - func makeDownstreamRequest( - ctx context.Context, - limits Limits, -diff --git a/pkg/querier/queryrange/detected_fields_test.go b/pkg/querier/queryrange/detected_fields_test.go -index 654a42ac8d00a..b0b363e4735d1 100644 ---- a/pkg/querier/queryrange/detected_fields_test.go -+++ b/pkg/querier/queryrange/detected_fields_test.go -@@ -4,6 +4,7 @@ import ( - ""context"" - ""fmt"" - ""math"" -+ ""slices"" - ""testing"" - ""time"" - -@@ -15,7 +16,6 @@ import ( - - ""github.com/grafana/loki/v3/pkg/loghttp"" - ""github.com/grafana/loki/v3/pkg/logproto"" -- ""github.com/grafana/loki/v3/pkg/logql/log"" - logql_log ""github.com/grafana/loki/v3/pkg/logql/log"" - ""github.com/grafana/loki/v3/pkg/logql/syntax"" - ""github.com/grafana/loki/v3/pkg/logqlmodel"" -@@ -354,7 +354,10 @@ func Test_parseDetectedFeilds(t *testing.T) { - duration = field - } - -- streamLbls.Add(log.ParsedLabel, labels.Label{Name: field.Name, Value: field.Value}) -+ streamLbls.Add( -+ logql_log.ParsedLabel, -+ labels.Label{Name: field.Name, Value: field.Value}, -+ ) - } - - rulerStreams = append(rulerStreams, push.Stream{ -@@ -495,7 +498,7 @@ func Test_parseDetectedFeilds(t *testing.T) { - } - - nginxStreamLbls.Add( -- log.ParsedLabel, -+ logql_log.ParsedLabel, - labels.Label{Name: field.Name, Value: field.Value}, - ) - } -@@ -958,6 +961,38 @@ func mockLogfmtStreamWithLabelsAndStructuredMetadata( - } - } - -+func limitedHandler(stream logproto.Stream) base.Handler { -+ return base.HandlerFunc( -+ func(_ context.Context, _ base.Request) (base.Response, error) { -+ return &LokiResponse{ -+ Status: ""success"", -+ Data: LokiData{ -+ ResultType: loghttp.ResultTypeStream, -+ Result: []logproto.Stream{ -+ stream, -+ }, -+ }, -+ Direction: logproto.BACKWARD, -+ }, nil -+ }) -+} -+ -+func logHandler(stream logproto.Stream) base.Handler { -+ return base.HandlerFunc( -+ func(_ context.Context, _ base.Request) (base.Response, error) { -+ return &LokiResponse{ -+ Status: ""success"", -+ Data: LokiData{ -+ ResultType: loghttp.ResultTypeStream, -+ Result: []logproto.Stream{ -+ stream, -+ }, -+ }, -+ Direction: logproto.BACKWARD, -+ }, nil -+ }) -+} -+ - func TestQuerier_DetectedFields(t *testing.T) { - limits := fakeLimits{ - maxSeries: math.MaxInt32, -@@ -967,50 +1002,18 @@ func TestQuerier_DetectedFields(t *testing.T) { - maxQuerierBytesRead: 100, - } - -- limitedHandler := func(stream logproto.Stream) base.Handler { -- return base.HandlerFunc( -- func(_ context.Context, _ base.Request) (base.Response, error) { -- return &LokiResponse{ -- Status: ""success"", -- Data: LokiData{ -- ResultType: loghttp.ResultTypeStream, -- Result: []logproto.Stream{ -- stream, -- }, -- }, -- Direction: logproto.BACKWARD, -- }, nil -- }) -- } -- -- logHandler := func(stream logproto.Stream) base.Handler { -- return base.HandlerFunc( -- func(_ context.Context, _ base.Request) (base.Response, error) { -- return &LokiResponse{ -- Status: ""success"", -- Data: LokiData{ -- ResultType: loghttp.ResultTypeStream, -- Result: []logproto.Stream{ -- stream, -- }, -- }, -- Direction: logproto.BACKWARD, -- }, nil -- }) -- } -- - request := DetectedFieldsRequest{ - logproto.DetectedFieldsRequest{ -- Start: time.Now().Add(-1 * time.Minute), -- End: time.Now(), -- Query: `{type=""test""} | logfmt | json`, -- LineLimit: 1000, -- FieldLimit: 1000, -+ Start: time.Now().Add(-1 * time.Minute), -+ End: time.Now(), -+ Query: `{type=""test""} | logfmt | json`, -+ LineLimit: 1000, -+ Limit: 1000, - }, - ""/loki/api/v1/detected_fields"", - } - -- handleRequest := func(handler base.Handler, request DetectedFieldsRequest) []*logproto.DetectedField { -+ handleRequest := func(handler base.Handler, request DetectedFieldsRequest) *logproto.DetectedFieldsResponse { - ctx := context.Background() - ctx = user.InjectOrgID(ctx, ""test-tenant"") - -@@ -1020,7 +1023,7 @@ func TestQuerier_DetectedFields(t *testing.T) { - r, ok := resp.(*DetectedFieldsResponse) - require.True(t, ok) - -- return r.Response.Fields -+ return r.Response - } - - t.Run(""returns detected fields from queried logs"", func(t *testing.T) { -@@ -1030,7 +1033,7 @@ func TestQuerier_DetectedFields(t *testing.T) { - limits, - ) - -- detectedFields := handleRequest(handler, request) -+ detectedFields := handleRequest(handler, request).Fields - // log lines come from querier_mock_test.go - // message=""line %d"" count=%d fake=true bytes=%dMB duration=%dms percent=%f even=%t - assert.Len(t, detectedFields, 8) -@@ -1052,12 +1055,16 @@ func TestQuerier_DetectedFields(t *testing.T) { - - t.Run(""returns detected fields with structured metadata from queried logs"", func(t *testing.T) { - handler := NewDetectedFieldsHandler( -- limitedHandler(mockLogfmtStreamWithLabelsAndStructuredMetadata(1, 5, `{type=""test"", name=""bob""}`)), -- logHandler(mockLogfmtStreamWithLabelsAndStructuredMetadata(1, 5, `{type=""test"", name=""bob""}`)), -+ limitedHandler( -+ mockLogfmtStreamWithLabelsAndStructuredMetadata(1, 5, `{type=""test"", name=""bob""}`), -+ ), -+ logHandler( -+ mockLogfmtStreamWithLabelsAndStructuredMetadata(1, 5, `{type=""test"", name=""bob""}`), -+ ), - limits, - ) - -- detectedFields := handleRequest(handler, request) -+ detectedFields := handleRequest(handler, request).Fields - // log lines come from querier_mock_test.go - // message=""line %d"" count=%d fake=true bytes=%dMB duration=%dms percent=%f even=%t - assert.Len(t, detectedFields, 10) -@@ -1086,7 +1093,7 @@ func TestQuerier_DetectedFields(t *testing.T) { - limits, - ) - -- detectedFields := handleRequest(handler, request) -+ detectedFields := handleRequest(handler, request).Fields - // log lines come from querier_mock_test.go - // message=""line %d"" count=%d fake=true bytes=%dMB duration=%dms percent=%f even=%t - assert.Len(t, detectedFields, 8) -@@ -1129,7 +1136,7 @@ func TestQuerier_DetectedFields(t *testing.T) { - limits, - ) - -- detectedFields := handleRequest(handler, request) -+ detectedFields := handleRequest(handler, request).Fields - // log lines come from querier_mock_test.go - // message=""line %d"" count=%d fake=true bytes=%dMB duration=%dms percent=%f even=%t - assert.Len(t, detectedFields, 10) -@@ -1172,13 +1179,23 @@ func TestQuerier_DetectedFields(t *testing.T) { - func(t *testing.T) { - handler := NewDetectedFieldsHandler( - limitedHandler( -- mockLogfmtStreamWithLabelsAndStructuredMetadata(1, 2, `{type=""test"", name=""bob""}`), -+ mockLogfmtStreamWithLabelsAndStructuredMetadata( -+ 1, -+ 2, -+ `{type=""test"", name=""bob""}`, -+ ), -+ ), -+ logHandler( -+ mockLogfmtStreamWithLabelsAndStructuredMetadata( -+ 1, -+ 2, -+ `{type=""test"", name=""bob""}`, -+ ), - ), -- logHandler(mockLogfmtStreamWithLabelsAndStructuredMetadata(1, 2, `{type=""test"", name=""bob""}`)), - limits, - ) - -- detectedFields := handleRequest(handler, request) -+ detectedFields := handleRequest(handler, request).Fields - // log lines come from querier_mock_test.go - // message=""line %d"" count=%d fake=true bytes=%dMB duration=%dms percent=%f even=%t - assert.Len(t, detectedFields, 10) -@@ -1198,48 +1215,148 @@ func TestQuerier_DetectedFields(t *testing.T) { - assert.Equal(t, uint64(1), nameField.Cardinality) - }, - ) -+ -+ t.Run(""returns values for a detected fields"", func(t *testing.T) { -+ handler := NewDetectedFieldsHandler( -+ limitedHandler( -+ mockLogfmtStreamWithLabelsAndStructuredMetadata(1, 5, `{type=""test"", name=""bob""}`), -+ ), -+ logHandler( -+ mockLogfmtStreamWithLabelsAndStructuredMetadata(1, 5, `{type=""test"", name=""bob""}`), -+ ), -+ limits, -+ ) -+ -+ request := DetectedFieldsRequest{ -+ logproto.DetectedFieldsRequest{ -+ Start: time.Now().Add(-1 * time.Minute), -+ End: time.Now(), -+ Query: `{type=""test""} | logfmt | json`, -+ LineLimit: 1000, -+ Limit: 1000, -+ Values: true, -+ Name: ""message"", -+ }, -+ ""/loki/api/v1/detected_field/message/values"", -+ } -+ -+ detectedFieldValues := handleRequest(handler, request).Values -+ // log lines come from querier_mock_test.go -+ // message=""line %d"" count=%d fake=true bytes=%dMB duration=%dms percent=%f even=%t -+ assert.Len(t, detectedFieldValues, 5) -+ -+ slices.Sort(detectedFieldValues) -+ assert.Equal(t, []string{ -+ ""line 1"", -+ ""line 2"", -+ ""line 3"", -+ ""line 4"", -+ ""line 5"", -+ }, detectedFieldValues) -+ }) -+ -+ t.Run( -+ ""returns values for a detected fields, enforcing the limit and removing duplicates"", -+ func(t *testing.T) { -+ handler := NewDetectedFieldsHandler( -+ limitedHandler( -+ mockLogfmtStreamWithLabelsAndStructuredMetadata( -+ 1, -+ 5, -+ `{type=""test""}`, -+ ), -+ ), -+ logHandler( -+ mockLogfmtStreamWithLabelsAndStructuredMetadata( -+ 1, -+ 5, -+ `{type=""test""}`, -+ ), -+ ), -+ limits, -+ ) -+ -+ request := DetectedFieldsRequest{ -+ logproto.DetectedFieldsRequest{ -+ Start: time.Now().Add(-1 * time.Minute), -+ End: time.Now(), -+ Query: `{type=""test""} | logfmt | json`, -+ LineLimit: 1000, -+ Limit: 3, -+ Values: true, -+ Name: ""message"", -+ }, -+ ""/loki/api/v1/detected_field/message/values"", -+ } -+ -+ detectedFieldValues := handleRequest(handler, request).Values -+ // log lines come from querier_mock_test.go -+ // message=""line %d"" count=%d fake=true bytes=%dMB duration=%dms percent=%f even=%t -+ assert.Len(t, detectedFieldValues, 3) -+ -+ request = DetectedFieldsRequest{ -+ logproto.DetectedFieldsRequest{ -+ Start: time.Now().Add(-1 * time.Minute), -+ End: time.Now(), -+ Query: `{type=""test""} | logfmt | json`, -+ LineLimit: 1000, -+ Limit: 3, -+ Values: true, -+ Name: ""name"", -+ }, -+ ""/loki/api/v1/detected_field/name/values"", -+ } -+ -+ secondValues := handleRequest(handler, request).Values -+ // log lines come from querier_mock_test.go -+ // message=""line %d"" count=%d fake=true bytes=%dMB duration=%dms percent=%f even=%t name=bar -+ assert.Len(t, secondValues, 1) -+ -+ assert.Equal(t, []string{ -+ ""bar"", -+ }, secondValues) -+ }, -+ ) - } - --// func BenchmarkQuerierDetectedFields(b *testing.B) { --// limits, _ := validation.NewOverrides(defaultLimitsTestConfig(), nil) --// ctx := user.InjectOrgID(context.Background(), ""test"") -- --// conf := mockQuerierConfig() --// conf.IngesterQueryStoreMaxLookback = 0 -- --// request := logproto.DetectedFieldsRequest{ --// Start: time.Now().Add(-1 * time.Minute), --// End: time.Now(), --// Query: `{type=""test""}`, --// LineLimit: 1000, --// FieldLimit: 1000, --// } -- --// store := newStoreMock() --// store.On(""SelectLogs"", mock.Anything, mock.Anything). --// Return(mockLogfmtStreamIterator(1, 2), nil) -- --// queryClient := newQueryClientMock() --// queryClient.On(""Recv""). --// Return(mockQueryResponse([]logproto.Stream{mockLogfmtStream(1, 2)}), nil) -- --// ingesterClient := newQuerierClientMock() --// ingesterClient.On(""Query"", mock.Anything, mock.Anything, mock.Anything). --// Return(queryClient, nil) -- --// querier, _ := newQuerier( --// conf, --// mockIngesterClientConfig(), --// newIngesterClientMockFactory(ingesterClient), --// mockReadRingWithOneActiveIngester(), --// &mockDeleteGettter{}, --// store, limits) -- --// b.ReportAllocs() --// b.ResetTimer() -- --// for i := 0; i < b.N; i++ { --// _, err := querier.DetectedFields(ctx, &request) --// assert.NoError(b, err) --// } --// } -+func BenchmarkQuerierDetectedFields(b *testing.B) { -+ limits := fakeLimits{ -+ maxSeries: math.MaxInt32, -+ maxQueryParallelism: 1, -+ tsdbMaxQueryParallelism: 1, -+ maxQueryBytesRead: 1000, -+ maxQuerierBytesRead: 100, -+ } -+ -+ request := logproto.DetectedFieldsRequest{ -+ Start: time.Now().Add(-1 * time.Minute), -+ End: time.Now(), -+ Query: `{type=""test""}`, -+ LineLimit: 1000, -+ Limit: 1000, -+ } -+ -+ b.ReportAllocs() -+ b.ResetTimer() -+ -+ handler := NewDetectedFieldsHandler( -+ limitedHandler( -+ mockLogfmtStreamWithLabelsAndStructuredMetadata(1, 5, `{type=""test"", name=""bob""}`), -+ ), -+ logHandler( -+ mockLogfmtStreamWithLabelsAndStructuredMetadata(1, 5, `{type=""test"", name=""bob""}`), -+ ), -+ limits, -+ ) -+ -+ for i := 0; i < b.N; i++ { -+ ctx := context.Background() -+ ctx = user.InjectOrgID(ctx, ""test-tenant"") -+ -+ resp, err := handler.Do(ctx, &request) -+ assert.NoError(b, err) -+ -+ _, ok := resp.(*DetectedFieldsResponse) -+ require.True(b, ok) -+ } -+} -diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go -index 4b9f3dbca9da8..af0d57f1c0c62 100644 ---- a/pkg/querier/queryrange/roundtrip.go -+++ b/pkg/querier/queryrange/roundtrip.go -@@ -445,7 +445,7 @@ func (r roundTripper) Do(ctx context.Context, req base.Request) (base.Response, - ""msg"", ""executing query"", - ""type"", ""detected_fields"", - ""end"", op.End, -- ""field_limit"", op.FieldLimit, -+ ""field_limit"", op.Limit, - ""length"", op.End.Sub(op.Start), - ""line_limit"", op.LineLimit, - ""query"", op.Query, -@@ -508,7 +508,7 @@ func getOperation(path string) string { - return QueryRangeOp - case strings.HasSuffix(path, ""/series""): - return SeriesOp -- case strings.HasSuffix(path, ""/labels"") || strings.HasSuffix(path, ""/label"") || strings.HasSuffix(path, ""/values""): -+ case strings.HasSuffix(path, ""/labels"") || strings.HasSuffix(path, ""/label""): - return LabelNamesOp - case strings.HasSuffix(path, ""/v1/query""): - return InstantQueryOp -@@ -521,6 +521,12 @@ func getOperation(path string) string { - case path == ""/loki/api/v1/index/shards"": - return IndexShardsOp - case path == ""/loki/api/v1/detected_fields"": -+ return DetectedFieldsOp -+ case strings.HasSuffix(path, ""/values""): -+ if strings.HasPrefix(path, ""/loki/api/v1/label"") || strings.HasPrefix(path, ""/prom/label"") { -+ return LabelNamesOp -+ } -+ - return DetectedFieldsOp - case path == ""/loki/api/v1/patterns"": - return PatternsQueryOp -diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go -index 2f3b5fcd92ee3..a899d64c21be0 100644 ---- a/pkg/querier/queryrange/roundtrip_test.go -+++ b/pkg/querier/queryrange/roundtrip_test.go -@@ -360,7 +360,7 @@ func TestInstantQueryTripperwareResultCaching(t *testing.T) { - testLocal.CacheResults = false - testLocal.CacheIndexStatsResults = false - testLocal.CacheInstantMetricResults = false -- var l = fakeLimits{ -+ l := fakeLimits{ - maxQueryParallelism: 1, - tsdbMaxQueryParallelism: 1, - maxQueryBytesRead: 1000, -@@ -1043,7 +1043,6 @@ func TestTripperware_EntriesLimit(t *testing.T) { - } - - func TestTripperware_RequiredLabels(t *testing.T) { -- - const noErr = """" - - for _, test := range []struct { -@@ -1095,7 +1094,6 @@ func TestTripperware_RequiredLabels(t *testing.T) { - } - - func TestTripperware_RequiredNumberLabels(t *testing.T) { -- - const noErr = """" - - for _, tc := range []struct { -@@ -1263,6 +1261,16 @@ func Test_getOperation(t *testing.T) { - path: ""/prom/label/__name__/values"", - expectedOp: LabelNamesOp, - }, -+ { -+ name: ""detected_fields"", -+ path: ""/loki/api/v1/detected_fields"", -+ expectedOp: DetectedFieldsOp, -+ }, -+ { -+ name: ""detected_fields_values"", -+ path: ""/loki/api/v1/detected_field/foo/values"", -+ expectedOp: DetectedFieldsOp, -+ }, - } - - for _, tc := range cases { -@@ -1523,6 +1531,7 @@ func (f fakeLimits) VolumeEnabled(_ string) bool { - func (f fakeLimits) TSDBMaxBytesPerShard(_ string) int { - return valid.DefaultTSDBMaxBytesPerShard - } -+ - func (f fakeLimits) TSDBShardingStrategy(string) string { - return logql.PowerOfTwoVersion.String() - } -diff --git a/pkg/querier/queryrange/splitters.go b/pkg/querier/queryrange/splitters.go -index fe3453b2ee717..06f4f903eb6b1 100644 ---- a/pkg/querier/queryrange/splitters.go -+++ b/pkg/querier/queryrange/splitters.go -@@ -103,12 +103,12 @@ func (s *defaultSplitter) split(execTime time.Time, tenantIDs []string, req quer - factory = func(start, end time.Time) { - reqs = append(reqs, &DetectedFieldsRequest{ - DetectedFieldsRequest: logproto.DetectedFieldsRequest{ -- Start: start, -- End: end, -- Query: r.GetQuery(), -- LineLimit: r.GetLineLimit(), -- FieldLimit: r.GetFieldLimit(), -- Step: r.GetStep(), -+ Start: start, -+ End: end, -+ Query: r.GetQuery(), -+ LineLimit: r.GetLineLimit(), -+ Limit: r.GetLimit(), -+ Step: r.GetStep(), - }, - path: r.path, - }) -diff --git a/pkg/storage/detected/fields.go b/pkg/storage/detected/fields.go -index 8dd9dd1a15126..4d96e2baea3f2 100644 ---- a/pkg/storage/detected/fields.go -+++ b/pkg/storage/detected/fields.go -@@ -53,9 +53,9 @@ func (f *UnmarshaledDetectedField) Merge(df *logproto.DetectedField) error { - - func MergeFields( - fields []*logproto.DetectedField, -- fieldLimit uint32, -+ limit uint32, - ) ([]*logproto.DetectedField, error) { -- mergedFields := make(map[string]*UnmarshaledDetectedField, fieldLimit) -+ mergedFields := make(map[string]*UnmarshaledDetectedField, limit) - foundFields := uint32(0) - - for _, field := range fields { -@@ -66,7 +66,7 @@ func MergeFields( - // TODO(twhitney): this will take the first N up to limit, is there a better - // way to rank the fields to make sure we get the most interesting ones? - f, ok := mergedFields[field.Label] -- if !ok && foundFields < fieldLimit { -+ if !ok && foundFields < limit { - unmarshaledField, err := UnmarshalDetectedField(field) - if err != nil { - return nil, err -@@ -86,7 +86,7 @@ func MergeFields( - } - } - -- result := make([]*logproto.DetectedField, 0, fieldLimit) -+ result := make([]*logproto.DetectedField, 0, limit) - for _, field := range mergedFields { - detectedField := &logproto.DetectedField{ - Label: field.Label, -@@ -100,3 +100,29 @@ func MergeFields( - - return result, nil - } -+ -+func MergeValues( -+ values []string, -+ limit uint32, -+) ([]string, error) { -+ mergedValues := make(map[string]struct{}, limit) -+ -+ for _, value := range values { -+ if value == """" { -+ continue -+ } -+ -+ if len(mergedValues) >= int(limit) { -+ break -+ } -+ -+ mergedValues[value] = struct{}{} -+ } -+ -+ result := make([]string, 0, limit) -+ for value := range mergedValues { -+ result = append(result, value) -+ } -+ -+ return result, nil -+} -diff --git a/pkg/storage/detected/fields_test.go b/pkg/storage/detected/fields_test.go -index 2c31fa1d2775e..9d0bff10d4376 100644 ---- a/pkg/storage/detected/fields_test.go -+++ b/pkg/storage/detected/fields_test.go -@@ -113,3 +113,35 @@ func Test_MergeFields(t *testing.T) { - require.Error(t, err) - }) - } -+ -+func Test_MergeValues(t *testing.T) { -+ t.Run(""merges different values"", func(t *testing.T) { -+ values := []string{""foo"", ""bar"", ""baz"", ""qux""} -+ limit := uint32(50) -+ -+ result, err := MergeValues(values, limit) -+ require.NoError(t, err) -+ assert.Equal(t, 4, len(result)) -+ assert.ElementsMatch(t, []string{""foo"", ""bar"", ""baz"", ""qux""}, result) -+ }) -+ -+ t.Run(""merges repeating values"", func(t *testing.T) { -+ values := []string{""foo"", ""bar"", ""baz"", ""qux"", ""foo"", ""bar"", ""baz"", ""qux""} -+ limit := uint32(50) -+ -+ result, err := MergeValues(values, limit) -+ require.NoError(t, err) -+ assert.Equal(t, 4, len(result)) -+ assert.ElementsMatch(t, []string{""foo"", ""bar"", ""baz"", ""qux""}, result) -+ }) -+ -+ t.Run(""enforces the limit"", func(t *testing.T) { -+ values := []string{""foo"", ""bar"", ""baz"", ""qux"", ""foo"", ""bar"", ""baz"", ""qux""} -+ limit := uint32(2) -+ -+ result, err := MergeValues(values, limit) -+ require.NoError(t, err) -+ assert.Equal(t, 2, len(result)) -+ assert.ElementsMatch(t, []string{""foo"", ""bar""}, result) -+ }) -+} -diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml -index ab62323c95f86..9640891480513 100644 ---- a/production/helm/loki/values.yaml -+++ b/production/helm/loki/values.yaml -@@ -1196,6 +1196,7 @@ ingress: - - /loki/api/v1/index/volume - - /loki/api/v1/index/volume_range - - /loki/api/v1/format_query -+ - /loki/api/v1/detected_field - - /loki/api/v1/detected_fields - - /loki/api/v1/detected_labels - - /loki/api/v1/patterns",feat,detected field values (#14350) -c762b9b5d3877e7cbfc41d8ab9a1a4287ebe97b2,2024-10-28 10:32:40,Joao Marcal,fix(mixins): retention dashboards fix metric name (#14617),False,"diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json b/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json -index cec461bff5117..316a43298b300 100644 ---- a/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json -+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json -@@ -495,7 +495,7 @@ - ""span"": 6, - ""targets"": [ - { -- ""expr"": ""sum(increase(loki_compactor_skipped_compacting_locked_table_total{cluster=~\""$cluster\"", namespace=~\""$namespace\""}[$__range]))"", -+ ""expr"": ""sum(loki_compactor_locked_table_successive_compaction_skips{cluster=~\""$cluster\"", namespace=~\""$namespace\""})"", - ""format"": ""time_series"", - ""legendFormat"": ""{{table_name}}"", - ""legendLink"": null -diff --git a/production/loki-mixin-compiled/dashboards/loki-retention.json b/production/loki-mixin-compiled/dashboards/loki-retention.json -index 70c5171d9c391..ac719445d2458 100644 ---- a/production/loki-mixin-compiled/dashboards/loki-retention.json -+++ b/production/loki-mixin-compiled/dashboards/loki-retention.json -@@ -495,7 +495,7 @@ - ""span"": 6, - ""targets"": [ - { -- ""expr"": ""sum(increase(loki_compactor_skipped_compacting_locked_table_total{cluster=~\""$cluster\"", namespace=~\""$namespace\""}[$__range]))"", -+ ""expr"": ""sum(loki_compactor_locked_table_successive_compaction_skips{cluster=~\""$cluster\"", namespace=~\""$namespace\""})"", - ""format"": ""time_series"", - ""legendFormat"": ""{{table_name}}"", - ""legendLink"": null -diff --git a/production/loki-mixin/dashboards/loki-retention.libsonnet b/production/loki-mixin/dashboards/loki-retention.libsonnet -index 2a1c4777a2930..faee87a52ccc6 100644 ---- a/production/loki-mixin/dashboards/loki-retention.libsonnet -+++ b/production/loki-mixin/dashboards/loki-retention.libsonnet -@@ -40,7 +40,7 @@ - $.row('') - .addPanel( - $.newQueryPanel('Number of times Tables were skipped during Compaction') + -- $.queryPanel(['sum(increase(loki_compactor_skipped_compacting_locked_table_total{%s}[$__range]))' % $.namespaceMatcher()], ['{{table_name}}']), -+ $.queryPanel(['sum(loki_compactor_locked_table_successive_compaction_skips{%s})' % $.namespaceMatcher()], ['{{table_name}}']), - ) - .addPanel( - $.newQueryPanel('Compact Tables Operations Per Status') +",fix,retention dashboards fix metric name (#14617) -19f5f3fd443dc8759ecdd24e0d0d563889adb235,2024-03-13 14:45:36,Cyril Tovena,fix: Fixes compactor shutdown log (#12195),False,"diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go -index 8e3fa52126929..f5062f2d6e33e 100644 ---- a/pkg/compactor/compactor.go -+++ b/pkg/compactor/compactor.go -@@ -512,7 +512,7 @@ func (c *Compactor) runCompactions(ctx context.Context) { - - // do the initial compaction - if err := c.RunCompaction(ctx, false); err != nil { -- level.Error(util_log.Logger).Log(""msg"", ""failed to run compaction"", err) -+ level.Error(util_log.Logger).Log(""msg"", ""failed to run compaction"", ""err"", err) - } - - c.wg.Add(1) -@@ -526,7 +526,7 @@ func (c *Compactor) runCompactions(ctx context.Context) { - select { - case <-ticker.C: - if err := c.RunCompaction(ctx, false); err != nil { -- level.Error(util_log.Logger).Log(""msg"", ""failed to run compaction"", err) -+ level.Error(util_log.Logger).Log(""msg"", ""failed to run compaction"", ""err"", err) - } - case <-ctx.Done(): - return -@@ -539,7 +539,7 @@ func (c *Compactor) runCompactions(ctx context.Context) { - go func() { - defer c.wg.Done() - if err := c.RunCompaction(ctx, true); err != nil { -- level.Error(util_log.Logger).Log(""msg"", ""failed to apply retention"", err) -+ level.Error(util_log.Logger).Log(""msg"", ""failed to apply retention"", ""err"", err) - } - - ticker := time.NewTicker(c.cfg.ApplyRetentionInterval) -@@ -549,7 +549,7 @@ func (c *Compactor) runCompactions(ctx context.Context) { - select { - case <-ticker.C: - if err := c.RunCompaction(ctx, true); err != nil { -- level.Error(util_log.Logger).Log(""msg"", ""failed to apply retention"", err) -+ level.Error(util_log.Logger).Log(""msg"", ""failed to apply retention"", ""err"", err) - } - case <-ctx.Done(): - return -@@ -876,7 +876,6 @@ func SortTablesByRange(tables []string) { - // less than if start time is after produces a most recent first sort order - return tableRanges[tables[i]].Start.After(tableRanges[tables[j]].Start) - }) -- - } - - func SchemaPeriodForTable(cfg config.SchemaConfig, tableName string) (config.PeriodConfig, bool) {",fix,Fixes compactor shutdown log (#12195) -42c43ecb4213f0a83e016afe5b8a89cd36757aa2,2025-01-10 22:13:27,renovate[bot],"fix(deps): update module github.com/aws/aws-sdk-go-v2 to v1.32.8 (#15680) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod -index 0ad8d1b4afa40..b57ce4ee38b2c 100644 ---- a/tools/lambda-promtail/go.mod -+++ b/tools/lambda-promtail/go.mod -@@ -4,7 +4,7 @@ go 1.22 - - require ( - github.com/aws/aws-lambda-go v1.47.0 -- github.com/aws/aws-sdk-go-v2 v1.32.7 -+ github.com/aws/aws-sdk-go-v2 v1.32.8 - github.com/aws/aws-sdk-go-v2/config v1.28.7 - github.com/aws/aws-sdk-go-v2/service/s3 v1.72.1 - github.com/go-kit/log v0.2.1 -diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum -index af98383f80ce6..2e5701e7a718c 100644 ---- a/tools/lambda-promtail/go.sum -+++ b/tools/lambda-promtail/go.sum -@@ -48,8 +48,8 @@ github.com/aws/aws-lambda-go v1.47.0 h1:0H8s0vumYx/YKs4sE7YM0ktwL2eWse+kfopsRI1s - github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= - github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= - github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= --github.com/aws/aws-sdk-go-v2 v1.32.7 h1:ky5o35oENWi0JYWUZkB7WYvVPP+bcRF5/Iq7JWSb5Rw= --github.com/aws/aws-sdk-go-v2 v1.32.7/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= -+github.com/aws/aws-sdk-go-v2 v1.32.8 h1:cZV+NUS/eGxKXMtmyhtYPJ7Z4YLoI/V8bkTdRZfYhGo= -+github.com/aws/aws-sdk-go-v2 v1.32.8/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8= - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc= - github.com/aws/aws-sdk-go-v2/config v1.28.7 h1:GduUnoTXlhkgnxTD93g1nv4tVPILbdNQOzav+Wpg7AE=",fix,"update module github.com/aws/aws-sdk-go-v2 to v1.32.8 (#15680) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -7bbd8b5087d637ac592403c5daafda35353fe13d,2024-05-02 01:49:09,Owen Diehl,feat(blooms): ingester aware bounded impl (#12840),False,"diff --git a/pkg/loghttp/query.go b/pkg/loghttp/query.go -index 8b135602b7f07..89ad4e00a79c0 100644 ---- a/pkg/loghttp/query.go -+++ b/pkg/loghttp/query.go -@@ -537,6 +537,9 @@ func ParseIndexShardsQuery(r *http.Request) (*RangeQuery, datasize.ByteSize, err - return nil, 0, err - } - targetBytes, err := parseBytes(r, ""targetBytesPerShard"", true) -+ if targetBytes <= 0 { -+ return nil, 0, errors.New(""targetBytesPerShard must be a positive value"") -+ } - return parsed, targetBytes, err - } - -diff --git a/pkg/storage/async_store.go b/pkg/storage/async_store.go -index 8d104d702b8bb..ed3c9dab6b422 100644 ---- a/pkg/storage/async_store.go -+++ b/pkg/storage/async_store.go -@@ -5,12 +5,15 @@ import ( - ""fmt"" - ""time"" - -+ ""github.com/c2h5oh/datasize"" - ""github.com/opentracing/opentracing-go"" - - ""github.com/grafana/loki/v3/pkg/logproto"" - ""github.com/grafana/loki/v3/pkg/storage/stores"" - ""github.com/grafana/loki/v3/pkg/storage/stores/index/seriesvolume"" -+ ""github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/sharding"" - -+ ""github.com/go-kit/log"" - ""github.com/go-kit/log/level"" - ""github.com/grafana/dskit/concurrency"" - ""github.com/prometheus/common/model"" -@@ -281,3 +284,105 @@ func filterDuplicateChunks(scfg config.SchemaConfig, storeChunks [][]chunk.Chunk - - return filteredChunkIDs - } -+ -+func (a *AsyncStore) GetShards( -+ ctx context.Context, -+ userID string, -+ from, through model.Time, -+ targetBytesPerShard uint64, -+ predicate chunk.Predicate, -+) (*logproto.ShardsResponse, error) { -+ logger := log.With( -+ util_log.WithContext(ctx, util_log.Logger), -+ ""component"", ""asyncStore"", -+ ) -+ -+ if !a.shouldQueryIngesters(through, model.Now()) { -+ return a.Store.GetShards(ctx, userID, from, through, targetBytesPerShard, predicate) -+ } -+ -+ var ( -+ shardResp *logproto.ShardsResponse -+ statsResp *stats.Stats -+ ) -+ -+ jobs := []func() error{ -+ func() error { -+ var err error -+ shardResp, err = a.Store.GetShards(ctx, userID, from, through, targetBytesPerShard, predicate) -+ return err -+ }, -+ // We can't dedupe shards by their contents, so we complement the -+ // store's response with the ingester's stats and . -+ func() error { -+ var err error -+ statsResp, err = a.ingesterQuerier.Stats(ctx, userID, from, through, predicate.Matchers...) -+ return err -+ }, -+ } -+ -+ if err := concurrency.ForEachJob( -+ ctx, -+ len(jobs), -+ len(jobs), -+ func(ctx context.Context, i int) error { -+ return jobs[i]() -+ }, -+ ); err != nil { -+ return nil, err -+ } -+ -+ return mergeShardsFromIngestersAndStore(logger, shardResp, statsResp, targetBytesPerShard), nil -+} -+ -+func mergeShardsFromIngestersAndStore( -+ logger log.Logger, -+ storeResp *logproto.ShardsResponse, -+ statsResp *logproto.IndexStatsResponse, -+ targetBytesPerShard uint64, -+) *logproto.ShardsResponse { -+ var storeBytes uint64 -+ for _, shard := range storeResp.Shards { -+ storeBytes += shard.Stats.Bytes -+ } -+ totalBytes := storeBytes + statsResp.Bytes -+ -+ defer func() { -+ level.Debug(logger).Log( -+ ""msg"", ""resolved shards "", -+ ""ingester_bytes"", datasize.ByteSize(statsResp.Bytes).HumanReadable(), -+ ""store_bytes"", datasize.ByteSize(storeBytes).HumanReadable(), -+ ""total_bytes"", datasize.ByteSize(totalBytes).HumanReadable(), -+ ""target_bytes"", datasize.ByteSize(targetBytesPerShard).HumanReadable(), -+ ""store_shards"", len(storeResp.Shards), -+ ) -+ }() -+ -+ // edge case to avoid divide by zero later -+ if totalBytes == 0 { -+ return &logproto.ShardsResponse{ -+ Shards: sharding.LinearShards(0, 0), -+ } -+ } -+ -+ // If the ingesters don't have enough data to meaningfuly -+ // change the number of shards, use the store response. -+ if pct := float64(statsResp.Bytes) / float64(totalBytes); pct < 0.25 { -+ return storeResp -+ } -+ -+ shards := sharding.LinearShards(int(totalBytes/targetBytesPerShard), totalBytes) -+ -+ // increment the total chunks by the number seen from ingesters -+ // NB(owen-d): this isn't perfect as it mixes signals a bit by joining -+ // store chunks which _could_ possibly be filtered with ingester chunks which can't, -+ // but it's still directionally helpful -+ updatedStats := storeResp.Statistics -+ updatedStats.Index.TotalChunks += int64(statsResp.Chunks) -+ return &logproto.ShardsResponse{ -+ Shards: shards, -+ Statistics: updatedStats, -+ // explicitly nil chunkgroups when we've changed the shards+included chunkrefs from ingesters -+ ChunkGroups: nil, -+ } -+} -diff --git a/pkg/storage/async_store_test.go b/pkg/storage/async_store_test.go -index a85b33ecccefd..9cf80868c861d 100644 ---- a/pkg/storage/async_store_test.go -+++ b/pkg/storage/async_store_test.go -@@ -5,7 +5,10 @@ import ( - ""testing"" - ""time"" - -+ ""github.com/go-kit/log"" -+ - ""github.com/grafana/loki/v3/pkg/logproto"" -+ ""github.com/grafana/loki/v3/pkg/logqlmodel/stats"" - - ""github.com/prometheus/common/model"" - ""github.com/prometheus/prometheus/model/labels"" -@@ -15,6 +18,7 @@ import ( - ""github.com/grafana/loki/v3/pkg/storage/chunk"" - ""github.com/grafana/loki/v3/pkg/storage/chunk/fetcher"" - ""github.com/grafana/loki/v3/pkg/storage/config"" -+ ""github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/sharding"" - ""github.com/grafana/loki/v3/pkg/util"" - ) - -@@ -29,8 +33,8 @@ func newStoreMock() *storeMock { - return &storeMock{} - } - --func (s *storeMock) GetChunks(ctx context.Context, userID string, from, through model.Time, predicate chunk.Predicate, storeChunksOverride *logproto.ChunkRefGroup) ([][]chunk.Chunk, []*fetcher.Fetcher, error) { -- args := s.Called(ctx, userID, from, through, predicate, storeChunksOverride) -+func (s *storeMock) GetChunks(ctx context.Context, userID string, from, through model.Time, predicate chunk.Predicate, overrides *logproto.ChunkRefGroup) ([][]chunk.Chunk, []*fetcher.Fetcher, error) { -+ args := s.Called(ctx, userID, from, through, predicate, overrides) - return args.Get(0).([][]chunk.Chunk), args.Get(1).([]*fetcher.Fetcher), args.Error(2) - } - -@@ -360,3 +364,80 @@ func convertChunksToChunkIDs(s config.SchemaConfig, chunks []chunk.Chunk) []stri - - return chunkIDs - } -+ -+func TestMergeShardsFromIngestersAndStore(t *testing.T) { -+ mkStats := func(bytes, chks uint64) logproto.IndexStatsResponse { -+ return logproto.IndexStatsResponse{ -+ Bytes: bytes, -+ Chunks: chks, -+ } -+ } -+ -+ // creates n shards with bytesPerShard * n bytes and chks chunks -+ mkShards := func(n int, bytesPerShard uint64, chks int64) logproto.ShardsResponse { -+ return logproto.ShardsResponse{ -+ Shards: sharding.LinearShards(n, bytesPerShard*uint64(n)), -+ Statistics: stats.Result{ -+ Index: stats.Index{ -+ TotalChunks: chks, -+ }, -+ }, -+ } -+ } -+ -+ targetBytesPerShard := 10 -+ -+ for _, tc := range []struct { -+ desc string -+ ingester logproto.IndexStatsResponse -+ store logproto.ShardsResponse -+ exp logproto.ShardsResponse -+ }{ -+ { -+ desc: ""zero bytes returns one full shard"", -+ ingester: mkStats(0, 0), -+ store: mkShards(0, 0, 0), -+ exp: mkShards(1, 0, 0), -+ }, -+ { -+ desc: ""zero ingester bytes honors store"", -+ ingester: mkStats(0, 0), -+ store: mkShards(10, uint64(targetBytesPerShard), 10), -+ exp: mkShards(10, uint64(targetBytesPerShard), 10), -+ }, -+ { -+ desc: ""zero store bytes honors ingester"", -+ ingester: mkStats(uint64(targetBytesPerShard*10), 10), -+ store: mkShards(0, 0, 0), -+ exp: mkShards(10, uint64(targetBytesPerShard), 10), -+ }, -+ { -+ desc: ""ingester bytes below threshold ignored"", -+ ingester: mkStats(uint64(targetBytesPerShard*2), 10), // 2 shards worth from ingesters -+ store: mkShards(10, uint64(targetBytesPerShard), 10), // 10 shards worth from store -+ exp: mkShards(10, uint64(targetBytesPerShard), 10), // use the store's resp -+ }, -+ { -+ desc: ""ingester bytes above threshold recreate shards"", -+ ingester: mkStats(uint64(targetBytesPerShard*4), 10), // 4 shards worth from ingesters -+ store: mkShards(10, uint64(targetBytesPerShard), 10), // 10 shards worth from store -+ exp: mkShards(14, uint64(targetBytesPerShard), 20), // regenerate 14 shards -+ }, -+ } { -+ -+ t.Run(tc.desc, func(t *testing.T) { -+ got := mergeShardsFromIngestersAndStore( -+ log.NewNopLogger(), -+ &tc.store, -+ &tc.ingester, -+ uint64(targetBytesPerShard), -+ ) -+ require.Equal(t, tc.exp.Statistics, got.Statistics) -+ require.Equal(t, tc.exp.ChunkGroups, got.ChunkGroups) -+ require.Equal(t, tc.exp.Statistics.Index.TotalChunks, got.Statistics.Index.TotalChunks) -+ for i, shard := range tc.exp.Shards { -+ require.Equal(t, shard, got.Shards[i], ""shard %d"", i) -+ } -+ }) -+ } -+} -diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/sharding/power.go b/pkg/storage/stores/shipper/indexshipper/tsdb/sharding/power.go -index 257c198ee2d75..219563e0e5358 100644 ---- a/pkg/storage/stores/shipper/indexshipper/tsdb/sharding/power.go -+++ b/pkg/storage/stores/shipper/indexshipper/tsdb/sharding/power.go -@@ -22,21 +22,7 @@ type PowerOfTwoSharding struct { - - func (p PowerOfTwoSharding) ShardsFor(bytes uint64, maxBytesPerShard uint64) []logproto.Shard { - factor := GuessShardFactor(bytes, maxBytesPerShard, p.MaxShards) -- -- if factor < 2 { -- return []logproto.Shard{{ -- Bounds: logproto.FPBounds{ -- Min: 0, -- Max: math.MaxUint64, -- }, -- Stats: &stats.Stats{ -- Bytes: bytes, -- }, -- }} -- } -- - return LinearShards(factor, bytes) -- - } - - // LinearShards is a sharding implementation that splits the data into -@@ -71,14 +57,13 @@ func LinearShards(n int, bytes uint64) []logproto.Shard { - Bytes: bytesPerShard, - }, - } -- -- // The last shard should have the remainder of the bytes -- // and the max bound should be math.MaxUint64 -- // NB(owen-d): this can only happen when maxShards is used -- // and the maxShards isn't a factor of 2 -- shards[len(shards)-1].Stats.Bytes += bytes % uint64(n) -- shards[len(shards)-1].Bounds.Max = math.MaxUint64 - } -+ // The last shard should have the remainder of the bytes -+ // and the max bound should be math.MaxUint64 -+ // NB(owen-d): this can only happen when maxShards is used -+ // and the maxShards isn't a factor of 2 -+ shards[len(shards)-1].Stats.Bytes += bytes % uint64(n) -+ shards[len(shards)-1].Bounds.Max = math.MaxUint64 - - return shards - -diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go -index 0159197831645..036f5660c0929 100644 ---- a/pkg/validation/limits.go -+++ b/pkg/validation/limits.go -@@ -480,6 +480,10 @@ func (l *Limits) Validate() error { - return err - } - -+ if l.TSDBMaxBytesPerShard <= 0 { -+ return errors.New(""querier.tsdb-max-bytes-per-shard must be greater than 0"") -+ } -+ - return nil - } - -diff --git a/pkg/validation/limits_test.go b/pkg/validation/limits_test.go -index 598a6f9033cde..2d4457c2a1191 100644 ---- a/pkg/validation/limits_test.go -+++ b/pkg/validation/limits_test.go -@@ -345,6 +345,7 @@ func TestLimitsValidation(t *testing.T) { - desc := fmt.Sprintf(""%s/%s"", tc.limits.DeletionMode, tc.limits.BloomBlockEncoding) - t.Run(desc, func(t *testing.T) { - tc.limits.TSDBShardingStrategy = logql.PowerOfTwoVersion.String() // hacky but needed for test -+ tc.limits.TSDBMaxBytesPerShard = DefaultTSDBMaxBytesPerShard - if tc.expected == nil { - require.NoError(t, tc.limits.Validate()) - } else {",feat,ingester aware bounded impl (#12840) -1b251e54a48e0b5861d1e3f1d601ba90b5b9ba48,2019-10-28 22:51:27,Björn Rabenstein,"Simplify regexp's (#1202) - -This is mostly to remove redundant anchoring (as it makes people -believe that Prometheus's regexp's aren't anchored by default, which -will backfire badly at some point), but it also removes useless -grouping (again to avoid confusion). - -Signed-off-by: beorn7 ",False,"diff --git a/docs/clients/promtail/scraping.md b/docs/clients/promtail/scraping.md -index cc1d4d23d73d5..2c5432e7ef76b 100644 ---- a/docs/clients/promtail/scraping.md -+++ b/docs/clients/promtail/scraping.md -@@ -97,7 +97,7 @@ value or transformed to a final external label, such as `__job__`. - * Drop the target if a label (`__service__` in the example) is empty: - ```yaml - - action: drop -- regex: ^$ -+ regex: '' - source_labels: - - __service__ - ``` -diff --git a/production/helm/promtail/templates/configmap.yaml b/production/helm/promtail/templates/configmap.yaml -index debc1480e9504..211279538f61c 100644 ---- a/production/helm/promtail/templates/configmap.yaml -+++ b/production/helm/promtail/templates/configmap.yaml -@@ -28,7 +28,7 @@ data: - - __meta_kubernetes_pod_node_name - target_label: __host__ - - action: drop -- regex: ^$ -+ regex: '' - source_labels: - - __service__ - - action: labelmap -@@ -75,7 +75,7 @@ data: - - __meta_kubernetes_pod_node_name - target_label: __host__ - - action: drop -- regex: ^$ -+ regex: '' - source_labels: - - __service__ - - action: labelmap -@@ -118,7 +118,7 @@ data: - - __meta_kubernetes_pod_label_name - - __meta_kubernetes_pod_label_app - - action: drop -- regex: ^([0-9a-z-.]+)(-[0-9a-f]{8,10})$ -+ regex: '[0-9a-z-.]+-[0-9a-f]{8,10}' - source_labels: - - __meta_kubernetes_pod_controller_name - - source_labels: -@@ -128,7 +128,7 @@ data: - - __meta_kubernetes_pod_node_name - target_label: __host__ - - action: drop -- regex: ^$ -+ regex: '' - source_labels: - - __service__ - - action: labelmap -@@ -171,11 +171,11 @@ data: - - __meta_kubernetes_pod_label_name - - __meta_kubernetes_pod_label_app - - action: keep -- regex: ^([0-9a-z-.]+)(-[0-9a-f]{8,10})$ -+ regex: '[0-9a-z-.]+-[0-9a-f]{8,10}' - source_labels: - - __meta_kubernetes_pod_controller_name - - action: replace -- regex: ^([0-9a-z-.]+)(-[0-9a-f]{8,10})$ -+ regex: '([0-9a-z-.]+)-[0-9a-f]{8,10}' - source_labels: - - __meta_kubernetes_pod_controller_name - target_label: __service__ -@@ -183,7 +183,7 @@ data: - - __meta_kubernetes_pod_node_name - target_label: __host__ - - action: drop -- regex: ^$ -+ regex: '' - source_labels: - - __service__ - - action: labelmap -@@ -220,7 +220,7 @@ data: - - role: pod - relabel_configs: - - action: drop -- regex: ^$ -+ regex: '' - source_labels: - - __meta_kubernetes_pod_annotation_kubernetes_io_config_mirror - - action: replace -@@ -231,7 +231,7 @@ data: - - __meta_kubernetes_pod_node_name - target_label: __host__ - - action: drop -- regex: ^$ -+ regex: '' - source_labels: - - __service__ - - action: labelmap -diff --git a/production/ksonnet/promtail/scrape_config.libsonnet b/production/ksonnet/promtail/scrape_config.libsonnet -index ad590a1d7877a..ff4a1bb2d493b 100644 ---- a/production/ksonnet/promtail/scrape_config.libsonnet -+++ b/production/ksonnet/promtail/scrape_config.libsonnet -@@ -20,7 +20,7 @@ config + { - { - source_labels: ['__service__'], - action: 'drop', -- regex: '^$', -+ regex: '', - }, - - // Include all the other labels on the pod. -@@ -122,7 +122,7 @@ config + { - { - source_labels: ['__meta_kubernetes_pod_controller_name'], - action: 'drop', -- regex: '^([0-9a-z-.]+)(-[0-9a-f]{8,10})$', -+ regex: '[0-9a-z-.]+-[0-9a-f]{8,10}', - }, - - // Use controller name as __service__. -@@ -149,15 +149,15 @@ config + { - // Drop pods not from an indirect controller. eg StatefulSets, DaemonSets - { - source_labels: ['__meta_kubernetes_pod_controller_name'], -- regex: '^([0-9a-z-.]+)(-[0-9a-f]{8,10})$', -+ regex: '[0-9a-z-.]+-[0-9a-f]{8,10}', - action: 'keep', - }, - -- // put the indirect controller name into a temp label. -+ // Put the indirect controller name into a temp label. - { - source_labels: ['__meta_kubernetes_pod_controller_name'], - action: 'replace', -- regex: '^([0-9a-z-.]+)(-[0-9a-f]{8,10})$', -+ regex: '([0-9a-z-.]+)-[0-9a-f]{8,10}', - target_label: '__service__', - }, - ] -@@ -171,7 +171,7 @@ config + { - { - action: 'drop', - source_labels: ['__meta_kubernetes_pod_annotation_kubernetes_io_config_mirror'], -- regex: '^$', -+ regex: '', - }, - - // Static control plane pods usually have a component label that identifies them",unknown,"Simplify regexp's (#1202) - -This is mostly to remove redundant anchoring (as it makes people -believe that Prometheus's regexp's aren't anchored by default, which -will backfire badly at some point), but it also removes useless -grouping (again to avoid confusion). - -Signed-off-by: beorn7 " -39e9c14611844ca133b9baa363b0282528279f33,2022-02-18 02:59:22,Josh Risley,"Use output plugin grafana-loki (#5386) - -Reflects name change in v2.1.0. - -- If an ECS user configures `""Name"": ""loki""` then the native output plugin will be used and the task configuration crashes in ECS.",False,"diff --git a/docs/sources/clients/aws/ecs/_index.md b/docs/sources/clients/aws/ecs/_index.md -index fcf53387bed95..b9f2db1f94548 100644 ---- a/docs/sources/clients/aws/ecs/_index.md -+++ b/docs/sources/clients/aws/ecs/_index.md -@@ -134,8 +134,8 @@ The `log_router` container image is the [Fluent bit Loki docker image][fluentbit - ""logConfiguration"": { - ""logDriver"": ""awsfirelens"", - ""options"": { -- ""Name"": ""loki"", -- ""Url"": ""https://:@logs-prod-us-central1.grafana.net/loki/api/v1/push"", -+ ""Name"": ""grafana-loki"", -+ ""Url"": ""https://:@/loki/api/v1/push"", - ""Labels"": ""{job=\""firelens\""}"", - ""RemoveKeys"": ""container_id,ecs_task_arn"", - ""LabelKeys"": ""container_name,ecs_task_definition,source,ecs_cluster"",",unknown,"Use output plugin grafana-loki (#5386) - -Reflects name change in v2.1.0. - -- If an ECS user configures `""Name"": ""loki""` then the native output plugin will be used and the task configuration crashes in ECS." -76c6681feb47910856430da795c55a45e9886dd6,2023-04-18 03:59:23,Trevor Whitney,"Add release notes appender action (#9063) - -**What this PR does / why we need it**: - -This PR aims to remove the manual process required to create release -notes during a release. It will allow contributors to add the label -`add-to-release-notes` to any PR. When that PR is merged, this action -will create another PR appending the original PR's # and title to the -release notes for the next release. This second PR will give the author -an opportunity to add a description to their addition, as well as give -maintainers an opportunity to discuss it's relevance in the release -notes.",False,"diff --git a/.github/workflows/append-release-notes.yml b/.github/workflows/append-release-notes.yml -new file mode 100644 -index 0000000000000..a512d6e18bacc ---- /dev/null -+++ b/.github/workflows/append-release-notes.yml -@@ -0,0 +1,27 @@ -+--- -+name: Append to Release Notes -+on: -+ pull_request_target: -+ types: -+ - closed -+ - labeled -+ -+jobs: -+ main: -+ runs-on: ubuntu-latest -+ steps: -+ - name: Checkout Actions -+ uses: actions/checkout@v3 -+ with: -+ repository: ""grafana/grafana-github-actions"" -+ path: ./actions -+ ref: main -+ - name: Install Actions -+ run: npm install --production --prefix ./actions -+ - name: Run release notes appender -+ uses: ./actions/release-notes-appender -+ with: -+ metricsWriteAPIKey: ${{secrets.GRAFANA_MISC_STATS_API_KEY}} -+ token: ${{secrets.GH_BOT_ACCESS_TOKEN}} -+ labelsToAdd: ""release-notes"" -+ releaseNotesFile: docs/sources/release-notes/next.md -diff --git a/docs/sources/release-notes/next.md b/docs/sources/release-notes/next.md -new file mode 100644 -index 0000000000000..fc4264cd30795 ---- /dev/null -+++ b/docs/sources/release-notes/next.md -@@ -0,0 +1,13 @@ -+--- -+title: V?.? -+description: Version ?.? release notes -+weight: 100000 -+--- -+ -+# V?.? -+Grafana Labs is excited to announce the release of Loki ?.?. Here's a summary of new enhancements and important fixes: -+ -+:warning: This a placeholder for the next release. Please clean up all features listed below -+ -+## Features and enhancements -+",unknown,"Add release notes appender action (#9063) - -**What this PR does / why we need it**: - -This PR aims to remove the manual process required to create release -notes during a release. It will allow contributors to add the label -`add-to-release-notes` to any PR. When that PR is merged, this action -will create another PR appending the original PR's # and title to the -release notes for the next release. This second PR will give the author -an opportunity to add a description to their addition, as well as give -maintainers an opportunity to discuss it's relevance in the release -notes." -d3c9cec22891b45ed1cb93a9eacc5dad6a117fc5,2024-05-23 05:29:28,Quentin Bisson,"fix: upgrade old plugin for the loki-operational dashboard. (#13016) - -Signed-off-by: QuentinBisson ",False,"diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json b/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json -index d9f668ed88a3f..911e9f7267d7c 100644 ---- a/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json -+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json -@@ -1657,9 +1657,93 @@ - } - }, - { -- ""columns"": [ ], - ""datasource"": ""$datasource"", -- ""fontSize"": ""100%"", -+ ""fieldConfig"": { -+ ""defaults"": { -+ ""color"": { -+ ""mode"": ""thresholds"" -+ }, -+ ""custom"": { -+ ""align"": ""right"", -+ ""cellOptions"": { -+ ""type"": ""auto"" -+ }, -+ ""inspect"": false -+ }, -+ ""decimals"": 2, -+ ""displayName"": """", -+ ""mappings"": [ ], -+ ""thresholds"": { -+ ""mode"": ""absolute"", -+ ""steps"": [ -+ { -+ ""color"": ""green"", -+ ""value"": null -+ }, -+ { -+ ""color"": ""red"", -+ ""value"": 80 -+ } -+ ] -+ }, -+ ""unit"": ""short"" -+ }, -+ ""overrides"": [ -+ { -+ ""matcher"": { -+ ""id"": ""byName"", -+ ""options"": ""Time"" -+ }, -+ ""properties"": [ -+ { -+ ""id"": ""displayName"", -+ ""value"": ""Time"" -+ }, -+ { -+ ""id"": ""custom.align"" -+ } -+ ] -+ }, -+ { -+ ""matcher"": { -+ ""id"": ""byName"", -+ ""options"": ""tenant"" -+ }, -+ ""properties"": [ -+ { -+ ""id"": ""unit"", -+ ""value"": ""short"" -+ }, -+ { -+ ""id"": ""decimals"", -+ ""value"": 2 -+ }, -+ { -+ ""id"": ""custom.align"" -+ } -+ ] -+ }, -+ { -+ ""matcher"": { -+ ""id"": ""byName"", -+ ""options"": ""reason"" -+ }, -+ ""properties"": [ -+ { -+ ""id"": ""unit"", -+ ""value"": ""short"" -+ }, -+ { -+ ""id"": ""decimals"", -+ ""value"": 2 -+ }, -+ { -+ ""id"": ""custom.align"" -+ } -+ ] -+ } -+ ] -+ }, - ""gridPos"": { - ""h"": 8, - ""w"": 12, -@@ -1667,71 +1751,20 @@ - ""y"": 27 - }, - ""id"": 113, -- ""pageSize"": null, -- ""panels"": [ ], -- ""showHeader"": true, -- ""sort"": { -- ""col"": 3, -- ""desc"": true -- }, -- ""styles"": [ -- { -- ""alias"": ""Time"", -- ""align"": ""auto"", -- ""dateFormat"": ""YYYY-MM-DD HH:mm:ss"", -- ""pattern"": ""Time"", -- ""type"": ""hidden"" -- }, -- { -- ""alias"": """", -- ""align"": ""auto"", -- ""colorMode"": null, -- ""colors"": [ -- ""rgba(245, 54, 54, 0.9)"", -- ""rgba(237, 129, 40, 0.89)"", -- ""rgba(50, 172, 45, 0.97)"" -- ], -- ""dateFormat"": ""YYYY-MM-DD HH:mm:ss"", -- ""decimals"": 2, -- ""mappingType"": 1, -- ""pattern"": ""tenant"", -- ""thresholds"": [ ], -- ""type"": ""string"", -- ""unit"": ""short"" -- }, -- { -- ""alias"": """", -- ""align"": ""auto"", -- ""colorMode"": null, -- ""colors"": [ -- ""rgba(245, 54, 54, 0.9)"", -- ""rgba(237, 129, 40, 0.89)"", -- ""rgba(50, 172, 45, 0.97)"" -+ ""options"": { -+ ""cellHeight"": ""sm"", -+ ""footer"": { -+ ""countRows"": false, -+ ""fields"": """", -+ ""reducer"": [ -+ ""sum"" - ], -- ""dateFormat"": ""YYYY-MM-DD HH:mm:ss"", -- ""decimals"": 2, -- ""mappingType"": 1, -- ""pattern"": ""reason"", -- ""thresholds"": [ ], -- ""type"": ""number"", -- ""unit"": ""short"" -+ ""show"": false - }, -- { -- ""alias"": """", -- ""align"": ""right"", -- ""colorMode"": null, -- ""colors"": [ -- ""rgba(245, 54, 54, 0.9)"", -- ""rgba(237, 129, 40, 0.89)"", -- ""rgba(50, 172, 45, 0.97)"" -- ], -- ""decimals"": 2, -- ""pattern"": ""/.*/"", -- ""thresholds"": [ ], -- ""type"": ""number"", -- ""unit"": ""short"" -- } -- ], -+ ""showHeader"": true -+ }, -+ ""panels"": [ ], -+ ""pluginVersion"": ""10.4.0"", - ""targets"": [ - { - ""expr"": ""topk(10, sum by (tenant, reason) (sum_over_time(increase(loki_discarded_samples_total{cluster=\""$cluster\"",namespace=\""$namespace\""}[$__rate_interval])[$__range:$__rate_interval])))"", -@@ -1742,11 +1775,16 @@ - ""refId"": ""A"" - } - ], -- ""timeFrom"": null, -- ""timeShift"": null, - ""title"": ""Discarded Lines Per Interval"", -- ""transform"": ""table"", -- ""type"": ""table-old"" -+ ""transformations"": [ -+ { -+ ""id"": ""merge"", -+ ""options"": { -+ ""reducers"": [ ] -+ } -+ } -+ ], -+ ""type"": ""table"" - } - ], - ""targets"": [ ], -diff --git a/production/loki-mixin-compiled/dashboards/loki-operational.json b/production/loki-mixin-compiled/dashboards/loki-operational.json -index d677775d52ef8..48684c77e5d9b 100644 ---- a/production/loki-mixin-compiled/dashboards/loki-operational.json -+++ b/production/loki-mixin-compiled/dashboards/loki-operational.json -@@ -1754,9 +1754,93 @@ - } - }, - { -- ""columns"": [ ], - ""datasource"": ""$datasource"", -- ""fontSize"": ""100%"", -+ ""fieldConfig"": { -+ ""defaults"": { -+ ""color"": { -+ ""mode"": ""thresholds"" -+ }, -+ ""custom"": { -+ ""align"": ""right"", -+ ""cellOptions"": { -+ ""type"": ""auto"" -+ }, -+ ""inspect"": false -+ }, -+ ""decimals"": 2, -+ ""displayName"": """", -+ ""mappings"": [ ], -+ ""thresholds"": { -+ ""mode"": ""absolute"", -+ ""steps"": [ -+ { -+ ""color"": ""green"", -+ ""value"": null -+ }, -+ { -+ ""color"": ""red"", -+ ""value"": 80 -+ } -+ ] -+ }, -+ ""unit"": ""short"" -+ }, -+ ""overrides"": [ -+ { -+ ""matcher"": { -+ ""id"": ""byName"", -+ ""options"": ""Time"" -+ }, -+ ""properties"": [ -+ { -+ ""id"": ""displayName"", -+ ""value"": ""Time"" -+ }, -+ { -+ ""id"": ""custom.align"" -+ } -+ ] -+ }, -+ { -+ ""matcher"": { -+ ""id"": ""byName"", -+ ""options"": ""tenant"" -+ }, -+ ""properties"": [ -+ { -+ ""id"": ""unit"", -+ ""value"": ""short"" -+ }, -+ { -+ ""id"": ""decimals"", -+ ""value"": 2 -+ }, -+ { -+ ""id"": ""custom.align"" -+ } -+ ] -+ }, -+ { -+ ""matcher"": { -+ ""id"": ""byName"", -+ ""options"": ""reason"" -+ }, -+ ""properties"": [ -+ { -+ ""id"": ""unit"", -+ ""value"": ""short"" -+ }, -+ { -+ ""id"": ""decimals"", -+ ""value"": 2 -+ }, -+ { -+ ""id"": ""custom.align"" -+ } -+ ] -+ } -+ ] -+ }, - ""gridPos"": { - ""h"": 8, - ""w"": 12, -@@ -1764,71 +1848,20 @@ - ""y"": 27 - }, - ""id"": 113, -- ""pageSize"": null, -- ""panels"": [ ], -- ""showHeader"": true, -- ""sort"": { -- ""col"": 3, -- ""desc"": true -- }, -- ""styles"": [ -- { -- ""alias"": ""Time"", -- ""align"": ""auto"", -- ""dateFormat"": ""YYYY-MM-DD HH:mm:ss"", -- ""pattern"": ""Time"", -- ""type"": ""hidden"" -- }, -- { -- ""alias"": """", -- ""align"": ""auto"", -- ""colorMode"": null, -- ""colors"": [ -- ""rgba(245, 54, 54, 0.9)"", -- ""rgba(237, 129, 40, 0.89)"", -- ""rgba(50, 172, 45, 0.97)"" -- ], -- ""dateFormat"": ""YYYY-MM-DD HH:mm:ss"", -- ""decimals"": 2, -- ""mappingType"": 1, -- ""pattern"": ""tenant"", -- ""thresholds"": [ ], -- ""type"": ""string"", -- ""unit"": ""short"" -- }, -- { -- ""alias"": """", -- ""align"": ""auto"", -- ""colorMode"": null, -- ""colors"": [ -- ""rgba(245, 54, 54, 0.9)"", -- ""rgba(237, 129, 40, 0.89)"", -- ""rgba(50, 172, 45, 0.97)"" -+ ""options"": { -+ ""cellHeight"": ""sm"", -+ ""footer"": { -+ ""countRows"": false, -+ ""fields"": """", -+ ""reducer"": [ -+ ""sum"" - ], -- ""dateFormat"": ""YYYY-MM-DD HH:mm:ss"", -- ""decimals"": 2, -- ""mappingType"": 1, -- ""pattern"": ""reason"", -- ""thresholds"": [ ], -- ""type"": ""number"", -- ""unit"": ""short"" -+ ""show"": false - }, -- { -- ""alias"": """", -- ""align"": ""right"", -- ""colorMode"": null, -- ""colors"": [ -- ""rgba(245, 54, 54, 0.9)"", -- ""rgba(237, 129, 40, 0.89)"", -- ""rgba(50, 172, 45, 0.97)"" -- ], -- ""decimals"": 2, -- ""pattern"": ""/.*/"", -- ""thresholds"": [ ], -- ""type"": ""number"", -- ""unit"": ""short"" -- } -- ], -+ ""showHeader"": true -+ }, -+ ""panels"": [ ], -+ ""pluginVersion"": ""10.4.0"", - ""targets"": [ - { - ""expr"": ""topk(10, sum by (tenant, reason) (sum_over_time(increase(loki_discarded_samples_total{cluster=\""$cluster\"",namespace=\""$namespace\""}[$__rate_interval])[$__range:$__rate_interval])))"", -@@ -1839,11 +1872,16 @@ - ""refId"": ""A"" - } - ], -- ""timeFrom"": null, -- ""timeShift"": null, - ""title"": ""Discarded Lines Per Interval"", -- ""transform"": ""table"", -- ""type"": ""table-old"" -+ ""transformations"": [ -+ { -+ ""id"": ""merge"", -+ ""options"": { -+ ""reducers"": [ ] -+ } -+ } -+ ], -+ ""type"": ""table"" - } - ], - ""targets"": [ ], -diff --git a/production/loki-mixin/dashboards/dashboard-loki-operational.json b/production/loki-mixin/dashboards/dashboard-loki-operational.json -index 3f215c2e90833..da51f8612ed8c 100644 ---- a/production/loki-mixin/dashboards/dashboard-loki-operational.json -+++ b/production/loki-mixin/dashboards/dashboard-loki-operational.json -@@ -1741,9 +1741,93 @@ - } - }, - { -- ""columns"": [], - ""datasource"": ""$datasource"", -- ""fontSize"": ""100%"", -+ ""fieldConfig"": { -+ ""defaults"": { -+ ""color"": { -+ ""mode"": ""thresholds"" -+ }, -+ ""custom"": { -+ ""align"": ""right"", -+ ""cellOptions"": { -+ ""type"": ""auto"" -+ }, -+ ""inspect"": false -+ }, -+ ""decimals"": 2, -+ ""displayName"": """", -+ ""mappings"": [], -+ ""thresholds"": { -+ ""mode"": ""absolute"", -+ ""steps"": [ -+ { -+ ""color"": ""green"", -+ ""value"": null -+ }, -+ { -+ ""color"": ""red"", -+ ""value"": 80 -+ } -+ ] -+ }, -+ ""unit"": ""short"" -+ }, -+ ""overrides"": [ -+ { -+ ""matcher"": { -+ ""id"": ""byName"", -+ ""options"": ""Time"" -+ }, -+ ""properties"": [ -+ { -+ ""id"": ""displayName"", -+ ""value"": ""Time"" -+ }, -+ { -+ ""id"": ""custom.align"" -+ } -+ ] -+ }, -+ { -+ ""matcher"": { -+ ""id"": ""byName"", -+ ""options"": ""tenant"" -+ }, -+ ""properties"": [ -+ { -+ ""id"": ""unit"", -+ ""value"": ""short"" -+ }, -+ { -+ ""id"": ""decimals"", -+ ""value"": 2 -+ }, -+ { -+ ""id"": ""custom.align"" -+ } -+ ] -+ }, -+ { -+ ""matcher"": { -+ ""id"": ""byName"", -+ ""options"": ""reason"" -+ }, -+ ""properties"": [ -+ { -+ ""id"": ""unit"", -+ ""value"": ""short"" -+ }, -+ { -+ ""id"": ""decimals"", -+ ""value"": 2 -+ }, -+ { -+ ""id"": ""custom.align"" -+ } -+ ] -+ } -+ ] -+ }, - ""gridPos"": { - ""h"": 8, - ""w"": 12, -@@ -1751,70 +1835,20 @@ - ""y"": 27 - }, - ""id"": 113, -- ""pageSize"": null, -- ""showHeader"": true, -- ""sort"": { -- ""col"": 3, -- ""desc"": true -- }, -- ""styles"": [ -- { -- ""alias"": ""Time"", -- ""align"": ""auto"", -- ""dateFormat"": ""YYYY-MM-DD HH:mm:ss"", -- ""pattern"": ""Time"", -- ""type"": ""hidden"" -- }, -- { -- ""alias"": """", -- ""align"": ""auto"", -- ""colorMode"": null, -- ""colors"": [ -- ""rgba(245, 54, 54, 0.9)"", -- ""rgba(237, 129, 40, 0.89)"", -- ""rgba(50, 172, 45, 0.97)"" -- ], -- ""dateFormat"": ""YYYY-MM-DD HH:mm:ss"", -- ""decimals"": 2, -- ""mappingType"": 1, -- ""pattern"": ""tenant"", -- ""thresholds"": [], -- ""type"": ""string"", -- ""unit"": ""short"" -- }, -- { -- ""alias"": """", -- ""align"": ""auto"", -- ""colorMode"": null, -- ""colors"": [ -- ""rgba(245, 54, 54, 0.9)"", -- ""rgba(237, 129, 40, 0.89)"", -- ""rgba(50, 172, 45, 0.97)"" -+ ""options"": { -+ ""cellHeight"": ""sm"", -+ ""footer"": { -+ ""countRows"": false, -+ ""fields"": """", -+ ""reducer"": [ -+ ""sum"" - ], -- ""dateFormat"": ""YYYY-MM-DD HH:mm:ss"", -- ""decimals"": 2, -- ""mappingType"": 1, -- ""pattern"": ""reason"", -- ""thresholds"": [], -- ""type"": ""number"", -- ""unit"": ""short"" -+ ""show"": false - }, -- { -- ""alias"": """", -- ""align"": ""right"", -- ""colorMode"": null, -- ""colors"": [ -- ""rgba(245, 54, 54, 0.9)"", -- ""rgba(237, 129, 40, 0.89)"", -- ""rgba(50, 172, 45, 0.97)"" -- ], -- ""decimals"": 2, -- ""pattern"": ""/.*/"", -- ""thresholds"": [], -- ""type"": ""number"", -- ""unit"": ""short"" -- } -- ], -+ ""showHeader"": true -+ }, -+ ""panels"": [], -+ ""pluginVersion"": ""10.4.0"", - ""targets"": [ - { - ""expr"": ""topk(10, sum by (tenant, reason) (sum_over_time(increase(loki_discarded_samples_total{cluster=\""$cluster\"",namespace=\""$namespace\""}[$__rate_interval])[$__range:$__rate_interval])))"", -@@ -1825,11 +1859,16 @@ - ""refId"": ""A"" - } - ], -- ""timeFrom"": null, -- ""timeShift"": null, - ""title"": ""Discarded Lines Per Interval"", -- ""transform"": ""table"", -- ""type"": ""table-old"" -+ ""transformations"": [ -+ { -+ ""id"": ""merge"", -+ ""options"": { -+ ""reducers"": [] -+ } -+ } -+ ], -+ ""type"": ""table"" - } - ], - ""title"": ""Limits"", -@@ -1852,7 +1891,6 @@ - ""dashLength"": 10, - ""dashes"": false, - ""datasource"": ""$datasource"", -- - ""fill"": 1, - ""fillGradient"": 0, - ""gridPos"": { -@@ -2244,7 +2282,7 @@ - ""dashes"": false, - ""datasource"": ""$datasource"", - ""fieldConfig"": { -- ""defaults"": { -+ ""defaults"": { - ""unit"": ""binBps"" - } - },",fix,"upgrade old plugin for the loki-operational dashboard. (#13016) - -Signed-off-by: QuentinBisson " -50ca4d51868cdddc4889f808bfb614fcda5db20d,2022-01-24 21:26:01,Susana Ferreira,Addition of cortex' queryrange tests (#5183),False,"diff --git a/pkg/querier/queryrange/queryrangebase/limits_test.go b/pkg/querier/queryrange/queryrangebase/limits_test.go -new file mode 100644 -index 0000000000000..381d35b047678 ---- /dev/null -+++ b/pkg/querier/queryrange/queryrangebase/limits_test.go -@@ -0,0 +1,217 @@ -+package queryrangebase -+ -+import ( -+ ""context"" -+ ""testing"" -+ ""time"" -+ -+ ""github.com/cortexproject/cortex/pkg/util"" -+ ""github.com/stretchr/testify/assert"" -+ ""github.com/stretchr/testify/mock"" -+ ""github.com/stretchr/testify/require"" -+ ""github.com/weaveworks/common/user"" -+) -+ -+func TestLimitsMiddleware_MaxQueryLookback(t *testing.T) { -+ const ( -+ thirtyDays = 30 * 24 * time.Hour -+ ) -+ -+ now := time.Now() -+ -+ tests := map[string]struct { -+ maxQueryLookback time.Duration -+ reqStartTime time.Time -+ reqEndTime time.Time -+ expectedSkipped bool -+ expectedStartTime time.Time -+ expectedEndTime time.Time -+ }{ -+ ""should not manipulate time range if max lookback is disabled"": { -+ maxQueryLookback: 0, -+ reqStartTime: time.Unix(0, 0), -+ reqEndTime: now, -+ expectedStartTime: time.Unix(0, 0), -+ expectedEndTime: now, -+ }, -+ ""should not manipulate time range for a query on short time range"": { -+ maxQueryLookback: thirtyDays, -+ reqStartTime: now.Add(-time.Hour), -+ reqEndTime: now, -+ expectedStartTime: now.Add(-time.Hour), -+ expectedEndTime: now, -+ }, -+ ""should not manipulate a query on large time range close to the limit"": { -+ maxQueryLookback: thirtyDays, -+ reqStartTime: now.Add(-thirtyDays).Add(time.Hour), -+ reqEndTime: now, -+ expectedStartTime: now.Add(-thirtyDays).Add(time.Hour), -+ expectedEndTime: now, -+ }, -+ ""should manipulate a query on large time range over the limit"": { -+ maxQueryLookback: thirtyDays, -+ reqStartTime: now.Add(-thirtyDays).Add(-100 * time.Hour), -+ reqEndTime: now, -+ expectedStartTime: now.Add(-thirtyDays), -+ expectedEndTime: now, -+ }, -+ ""should skip executing a query outside the allowed time range"": { -+ maxQueryLookback: thirtyDays, -+ reqStartTime: now.Add(-thirtyDays).Add(-100 * time.Hour), -+ reqEndTime: now.Add(-thirtyDays).Add(-90 * time.Hour), -+ expectedSkipped: true, -+ }, -+ } -+ -+ for testName, testData := range tests { -+ t.Run(testName, func(t *testing.T) { -+ req := &PrometheusRequest{ -+ Start: util.TimeToMillis(testData.reqStartTime), -+ End: util.TimeToMillis(testData.reqEndTime), -+ } -+ -+ limits := mockLimits{maxQueryLookback: testData.maxQueryLookback} -+ middleware := NewLimitsMiddleware(limits) -+ -+ innerRes := NewEmptyPrometheusResponse() -+ inner := &mockHandler{} -+ inner.On(""Do"", mock.Anything, mock.Anything).Return(innerRes, nil) -+ -+ ctx := user.InjectOrgID(context.Background(), ""test"") -+ outer := middleware.Wrap(inner) -+ res, err := outer.Do(ctx, req) -+ require.NoError(t, err) -+ -+ if testData.expectedSkipped { -+ // We expect an empty response, but not the one returned by the inner handler -+ // which we expect has been skipped. -+ assert.NotSame(t, innerRes, res) -+ assert.Len(t, inner.Calls, 0) -+ } else { -+ // We expect the response returned by the inner handler. -+ assert.Same(t, innerRes, res) -+ -+ // Assert on the time range of the request passed to the inner handler (5s delta). -+ delta := float64(5000) -+ require.Len(t, inner.Calls, 1) -+ assert.InDelta(t, util.TimeToMillis(testData.expectedStartTime), inner.Calls[0].Arguments.Get(1).(Request).GetStart(), delta) -+ assert.InDelta(t, util.TimeToMillis(testData.expectedEndTime), inner.Calls[0].Arguments.Get(1).(Request).GetEnd(), delta) -+ } -+ }) -+ } -+} -+ -+func TestLimitsMiddleware_MaxQueryLength(t *testing.T) { -+ const ( -+ thirtyDays = 30 * 24 * time.Hour -+ ) -+ -+ now := time.Now() -+ -+ tests := map[string]struct { -+ maxQueryLength time.Duration -+ reqStartTime time.Time -+ reqEndTime time.Time -+ expectedErr string -+ }{ -+ ""should skip validation if max length is disabled"": { -+ maxQueryLength: 0, -+ reqStartTime: time.Unix(0, 0), -+ reqEndTime: now, -+ }, -+ ""should succeed on a query on short time range, ending now"": { -+ maxQueryLength: thirtyDays, -+ reqStartTime: now.Add(-time.Hour), -+ reqEndTime: now, -+ }, -+ ""should succeed on a query on short time range, ending in the past"": { -+ maxQueryLength: thirtyDays, -+ reqStartTime: now.Add(-2 * thirtyDays).Add(-time.Hour), -+ reqEndTime: now.Add(-2 * thirtyDays), -+ }, -+ ""should succeed on a query on large time range close to the limit, ending now"": { -+ maxQueryLength: thirtyDays, -+ reqStartTime: now.Add(-thirtyDays).Add(time.Hour), -+ reqEndTime: now, -+ }, -+ ""should fail on a query on large time range over the limit, ending now"": { -+ maxQueryLength: thirtyDays, -+ reqStartTime: now.Add(-thirtyDays).Add(-100 * time.Hour), -+ reqEndTime: now, -+ expectedErr: ""the query time range exceeds the limit"", -+ }, -+ ""should fail on a query on large time range over the limit, ending in the past"": { -+ maxQueryLength: thirtyDays, -+ reqStartTime: now.Add(-4 * thirtyDays), -+ reqEndTime: now.Add(-2 * thirtyDays), -+ expectedErr: ""the query time range exceeds the limit"", -+ }, -+ } -+ -+ for testName, testData := range tests { -+ t.Run(testName, func(t *testing.T) { -+ req := &PrometheusRequest{ -+ Start: util.TimeToMillis(testData.reqStartTime), -+ End: util.TimeToMillis(testData.reqEndTime), -+ } -+ -+ limits := mockLimits{maxQueryLength: testData.maxQueryLength} -+ middleware := NewLimitsMiddleware(limits) -+ -+ innerRes := NewEmptyPrometheusResponse() -+ inner := &mockHandler{} -+ inner.On(""Do"", mock.Anything, mock.Anything).Return(innerRes, nil) -+ -+ ctx := user.InjectOrgID(context.Background(), ""test"") -+ outer := middleware.Wrap(inner) -+ res, err := outer.Do(ctx, req) -+ -+ if testData.expectedErr != """" { -+ require.Error(t, err) -+ assert.Contains(t, err.Error(), testData.expectedErr) -+ assert.Nil(t, res) -+ assert.Len(t, inner.Calls, 0) -+ } else { -+ // We expect the response returned by the inner handler. -+ require.NoError(t, err) -+ assert.Same(t, innerRes, res) -+ -+ // The time range of the request passed to the inner handler should have not been manipulated. -+ require.Len(t, inner.Calls, 1) -+ assert.Equal(t, util.TimeToMillis(testData.reqStartTime), inner.Calls[0].Arguments.Get(1).(Request).GetStart()) -+ assert.Equal(t, util.TimeToMillis(testData.reqEndTime), inner.Calls[0].Arguments.Get(1).(Request).GetEnd()) -+ } -+ }) -+ } -+} -+ -+type mockLimits struct { -+ maxQueryLookback time.Duration -+ maxQueryLength time.Duration -+ maxCacheFreshness time.Duration -+} -+ -+func (m mockLimits) MaxQueryLookback(string) time.Duration { -+ return m.maxQueryLookback -+} -+ -+func (m mockLimits) MaxQueryLength(string) time.Duration { -+ return m.maxQueryLength -+} -+ -+func (mockLimits) MaxQueryParallelism(string) int { -+ return 14 // Flag default. -+} -+ -+func (m mockLimits) MaxCacheFreshness(string) time.Duration { -+ return m.maxCacheFreshness -+} -+ -+type mockHandler struct { -+ mock.Mock -+} -+ -+func (m *mockHandler) Do(ctx context.Context, req Request) (Response, error) { -+ args := m.Called(ctx, req) -+ return args.Get(0).(Response), args.Error(1) -+} -diff --git a/pkg/querier/queryrange/queryrangebase/marshaling_test.go b/pkg/querier/queryrange/queryrangebase/marshaling_test.go -new file mode 100644 -index 0000000000000..955735bcefd82 ---- /dev/null -+++ b/pkg/querier/queryrange/queryrangebase/marshaling_test.go -@@ -0,0 +1,90 @@ -+package queryrangebase -+ -+import ( -+ ""bytes"" -+ ""context"" -+ ""io/ioutil"" -+ ""math/rand"" -+ ""net/http"" -+ ""testing"" -+ -+ ""github.com/cortexproject/cortex/pkg/cortexpb"" -+ ""github.com/stretchr/testify/require"" -+) -+ -+func BenchmarkPrometheusCodec_DecodeResponse(b *testing.B) { -+ const ( -+ numSeries = 1000 -+ numSamplesPerSeries = 1000 -+ ) -+ -+ // Generate a mocked response and marshal it. -+ res := mockPrometheusResponse(numSeries, numSamplesPerSeries) -+ encodedRes, err := json.Marshal(res) -+ require.NoError(b, err) -+ b.Log(""test prometheus response size:"", len(encodedRes)) -+ -+ b.ResetTimer() -+ b.ReportAllocs() -+ -+ for n := 0; n < b.N; n++ { -+ _, err := PrometheusCodec.DecodeResponse(context.Background(), &http.Response{ -+ StatusCode: 200, -+ Body: ioutil.NopCloser(bytes.NewReader(encodedRes)), -+ ContentLength: int64(len(encodedRes)), -+ }, nil) -+ require.NoError(b, err) -+ } -+} -+ -+func BenchmarkPrometheusCodec_EncodeResponse(b *testing.B) { -+ const ( -+ numSeries = 1000 -+ numSamplesPerSeries = 1000 -+ ) -+ -+ // Generate a mocked response and marshal it. -+ res := mockPrometheusResponse(numSeries, numSamplesPerSeries) -+ -+ b.ResetTimer() -+ b.ReportAllocs() -+ -+ for n := 0; n < b.N; n++ { -+ _, err := PrometheusCodec.EncodeResponse(context.Background(), res) -+ require.NoError(b, err) -+ } -+} -+ -+func mockPrometheusResponse(numSeries, numSamplesPerSeries int) *PrometheusResponse { -+ stream := make([]SampleStream, numSeries) -+ for s := 0; s < numSeries; s++ { -+ // Generate random samples. -+ samples := make([]cortexpb.Sample, numSamplesPerSeries) -+ for i := 0; i < numSamplesPerSeries; i++ { -+ samples[i] = cortexpb.Sample{ -+ Value: rand.Float64(), -+ TimestampMs: int64(i), -+ } -+ } -+ -+ // Generate random labels. -+ lbls := make([]cortexpb.LabelAdapter, 10) -+ for i := range lbls { -+ lbls[i].Name = ""a_medium_size_label_name"" -+ lbls[i].Value = ""a_medium_size_label_value_that_is_used_to_benchmark_marshalling"" -+ } -+ -+ stream[s] = SampleStream{ -+ Labels: lbls, -+ Samples: samples, -+ } -+ } -+ -+ return &PrometheusResponse{ -+ Status: ""success"", -+ Data: PrometheusData{ -+ ResultType: ""vector"", -+ Result: stream, -+ }, -+ } -+} -diff --git a/pkg/querier/queryrange/queryrangebase/promql_test.go b/pkg/querier/queryrange/queryrangebase/promql_test.go -new file mode 100644 -index 0000000000000..3092d596f5085 ---- /dev/null -+++ b/pkg/querier/queryrange/queryrangebase/promql_test.go -@@ -0,0 +1,685 @@ -+package queryrangebase -+ -+import ( -+ ""context"" -+ ""fmt"" -+ ""math"" -+ ""sort"" -+ ""strings"" -+ ""testing"" -+ ""time"" -+ -+ ""github.com/go-kit/log"" -+ ""github.com/prometheus/client_golang/prometheus"" -+ ""github.com/prometheus/prometheus/model/labels"" -+ ""github.com/prometheus/prometheus/promql"" -+ ""github.com/prometheus/prometheus/storage"" -+ ""github.com/stretchr/testify/require"" -+ -+ ""github.com/grafana/loki/pkg/querier/astmapper"" -+) -+ -+var ( -+ start = time.Unix(1000, 0) -+ end = start.Add(3 * time.Minute) -+ step = 30 * time.Second -+ ctx = context.Background() -+ engine = promql.NewEngine(promql.EngineOpts{ -+ Reg: prometheus.DefaultRegisterer, -+ Logger: log.NewNopLogger(), -+ Timeout: 1 * time.Hour, -+ MaxSamples: 10e6, -+ ActiveQueryTracker: nil, -+ }) -+) -+ -+// This test allows to verify which PromQL expressions can be parallelized. -+func Test_PromQL(t *testing.T) { -+ t.Parallel() -+ -+ var tests = []struct { -+ normalQuery string -+ shardQuery string -+ shouldEqual bool -+ }{ -+ // Vector can be parallelized but we need to remove the cortex shard label. -+ // It should be noted that the __cortex_shard__ label is required by the engine -+ // and therefore should be returned by the storage. -+ // Range vectors `bar1{baz=""blip""}[1m]` are not tested here because it is not supported -+ // by range queries. -+ { -+ `bar1{baz=""blip""}`, -+ `label_replace( -+ bar1{__cortex_shard__=""0_of_3"",baz=""blip""} or -+ bar1{__cortex_shard__=""1_of_3"",baz=""blip""} or -+ bar1{__cortex_shard__=""2_of_3"",baz=""blip""}, -+ ""__cortex_shard__"","""","""","""" -+ )`, -+ true, -+ }, -+ // __cortex_shard__ label is required otherwise the or will keep only the first series. -+ { -+ `sum(bar1{baz=""blip""})`, -+ `sum( -+ sum (bar1{__cortex_shard__=""0_of_3"",baz=""blip""}) or -+ sum (bar1{__cortex_shard__=""1_of_3"",baz=""blip""}) or -+ sum (bar1{__cortex_shard__=""2_of_3"",baz=""blip""}) -+ )`, -+ false, -+ }, -+ { -+ `sum(bar1{baz=""blip""})`, -+ `sum( -+ sum without(__cortex_shard__) (bar1{__cortex_shard__=""0_of_3"",baz=""blip""}) or -+ sum without(__cortex_shard__) (bar1{__cortex_shard__=""1_of_3"",baz=""blip""}) or -+ sum without(__cortex_shard__) (bar1{__cortex_shard__=""2_of_3"",baz=""blip""}) -+ )`, -+ true, -+ }, -+ { -+ `sum by (foo) (bar1{baz=""blip""})`, -+ `sum by (foo) ( -+ sum by(foo,__cortex_shard__) (bar1{__cortex_shard__=""0_of_3"",baz=""blip""}) or -+ sum by(foo,__cortex_shard__) (bar1{__cortex_shard__=""1_of_3"",baz=""blip""}) or -+ sum by(foo,__cortex_shard__) (bar1{__cortex_shard__=""2_of_3"",baz=""blip""}) -+ )`, -+ true, -+ }, -+ { -+ `sum by (foo,bar) (bar1{baz=""blip""})`, -+ `sum by (foo,bar)( -+ sum by(foo,bar,__cortex_shard__) (bar1{__cortex_shard__=""0_of_3"",baz=""blip""}) or -+ sum by(foo,bar,__cortex_shard__) (bar1{__cortex_shard__=""1_of_3"",baz=""blip""}) or -+ sum by(foo,bar,__cortex_shard__) (bar1{__cortex_shard__=""2_of_3"",baz=""blip""}) -+ )`, -+ true, -+ }, -+ // since series are unique to a shard, it's safe to sum without shard first, then reaggregate -+ { -+ `sum without (foo,bar) (bar1{baz=""blip""})`, -+ `sum without (foo,bar)( -+ sum without(__cortex_shard__) (bar1{__cortex_shard__=""0_of_3"",baz=""blip""}) or -+ sum without(__cortex_shard__) (bar1{__cortex_shard__=""1_of_3"",baz=""blip""}) or -+ sum without(__cortex_shard__) (bar1{__cortex_shard__=""2_of_3"",baz=""blip""}) -+ )`, -+ true, -+ }, -+ { -+ `min by (foo,bar) (bar1{baz=""blip""})`, -+ `min by (foo,bar)( -+ min by(foo,bar,__cortex_shard__) (bar1{__cortex_shard__=""0_of_3"",baz=""blip""}) or -+ min by(foo,bar,__cortex_shard__) (bar1{__cortex_shard__=""1_of_3"",baz=""blip""}) or -+ min by(foo,bar,__cortex_shard__) (bar1{__cortex_shard__=""2_of_3"",baz=""blip""}) -+ )`, -+ true, -+ }, -+ { -+ `max by (foo,bar) (bar1{baz=""blip""})`, -+ ` max by (foo,bar)( -+ max by(foo,bar,__cortex_shard__) (bar1{__cortex_shard__=""0_of_3"",baz=""blip""}) or -+ max by(foo,bar,__cortex_shard__) (bar1{__cortex_shard__=""1_of_3"",baz=""blip""}) or -+ max by(foo,bar,__cortex_shard__) (bar1{__cortex_shard__=""2_of_3"",baz=""blip""}) -+ )`, -+ true, -+ }, -+ // avg generally cant be parallelized -+ { -+ `avg(bar1{baz=""blip""})`, -+ `avg( -+ avg by(__cortex_shard__) (bar1{__cortex_shard__=""0_of_3"",baz=""blip""}) or -+ avg by(__cortex_shard__) (bar1{__cortex_shard__=""1_of_3"",baz=""blip""}) or -+ avg by(__cortex_shard__) (bar1{__cortex_shard__=""2_of_3"",baz=""blip""}) -+ )`, -+ false, -+ }, -+ // stddev can't be parallelized. -+ { -+ `stddev(bar1{baz=""blip""})`, -+ ` stddev( -+ stddev by(__cortex_shard__) (bar1{__cortex_shard__=""0_of_3"",baz=""blip""}) or -+ stddev by(__cortex_shard__) (bar1{__cortex_shard__=""1_of_3"",baz=""blip""}) or -+ stddev by(__cortex_shard__) (bar1{__cortex_shard__=""2_of_3"",baz=""blip""}) -+ )`, -+ false, -+ }, -+ // stdvar can't be parallelized. -+ { -+ `stdvar(bar1{baz=""blip""})`, -+ `stdvar( -+ stdvar by(__cortex_shard__) (bar1{__cortex_shard__=""0_of_3"",baz=""blip""}) or -+ stdvar by(__cortex_shard__) (bar1{__cortex_shard__=""1_of_3"",baz=""blip""}) or -+ stdvar by(__cortex_shard__) (bar1{__cortex_shard__=""2_of_3"",baz=""blip""}) -+ )`, -+ false, -+ }, -+ { -+ `count(bar1{baz=""blip""})`, -+ `count( -+ count without (__cortex_shard__) (bar1{__cortex_shard__=""0_of_3"",baz=""blip""}) or -+ count without (__cortex_shard__) (bar1{__cortex_shard__=""1_of_3"",baz=""blip""}) or -+ count without (__cortex_shard__) (bar1{__cortex_shard__=""2_of_3"",baz=""blip""}) -+ )`, -+ true, -+ }, -+ { -+ `count by (foo,bar) (bar1{baz=""blip""})`, -+ `count by (foo,bar) ( -+ count by (foo,bar,__cortex_shard__) (bar1{__cortex_shard__=""0_of_3"",baz=""blip""}) or -+ count by (foo,bar,__cortex_shard__) (bar1{__cortex_shard__=""1_of_3"",baz=""blip""}) or -+ count by (foo,bar,__cortex_shard__) (bar1{__cortex_shard__=""2_of_3"",baz=""blip""}) -+ )`, -+ true, -+ }, -+ // different ways to represent count without. -+ { -+ `count without (foo) (bar1{baz=""blip""})`, -+ `count without (foo) ( -+ count without (__cortex_shard__) (bar1{__cortex_shard__=""0_of_3"",baz=""blip""}) or -+ count without (__cortex_shard__) (bar1{__cortex_shard__=""1_of_3"",baz=""blip""}) or -+ count without (__cortex_shard__) (bar1{__cortex_shard__=""2_of_3"",baz=""blip""}) -+ )`, -+ true, -+ }, -+ { -+ `count without (foo) (bar1{baz=""blip""})`, -+ `sum without (__cortex_shard__) ( -+ count without (foo) (bar1{__cortex_shard__=""0_of_3"",baz=""blip""}) or -+ count without (foo) (bar1{__cortex_shard__=""1_of_3"",baz=""blip""}) or -+ count without (foo) (bar1{__cortex_shard__=""2_of_3"",baz=""blip""}) -+ )`, -+ true, -+ }, -+ { -+ `count without (foo, bar) (bar1{baz=""blip""})`, -+ `count without (foo, bar) ( -+ count without (__cortex_shard__) (bar1{__cortex_shard__=""0_of_3"",baz=""blip""}) or -+ count without (__cortex_shard__) (bar1{__cortex_shard__=""1_of_3"",baz=""blip""}) or -+ count without (__cortex_shard__) (bar1{__cortex_shard__=""2_of_3"",baz=""blip""}) -+ )`, -+ true, -+ }, -+ { -+ `topk(2,bar1{baz=""blip""})`, -+ `label_replace( -+ topk(2, -+ topk(2,(bar1{__cortex_shard__=""0_of_3"",baz=""blip""})) without(__cortex_shard__) or -+ topk(2,(bar1{__cortex_shard__=""1_of_3"",baz=""blip""})) without(__cortex_shard__) or -+ topk(2,(bar1{__cortex_shard__=""2_of_3"",baz=""blip""})) without(__cortex_shard__) -+ ), -+ ""__cortex_shard__"","""","""","""")`, -+ true, -+ }, -+ { -+ `bottomk(2,bar1{baz=""blip""})`, -+ `label_replace( -+ bottomk(2, -+ bottomk(2,(bar1{__cortex_shard__=""0_of_3"",baz=""blip""})) without(__cortex_shard__) or -+ bottomk(2,(bar1{__cortex_shard__=""1_of_3"",baz=""blip""})) without(__cortex_shard__) or -+ bottomk(2,(bar1{__cortex_shard__=""2_of_3"",baz=""blip""})) without(__cortex_shard__) -+ ), -+ ""__cortex_shard__"","""","""","""")`, -+ true, -+ }, -+ { -+ `sum by (foo,bar) (avg_over_time(bar1{baz=""blip""}[1m]))`, -+ `sum by (foo,bar)( -+ sum by(foo,bar,__cortex_shard__) (avg_over_time(bar1{__cortex_shard__=""0_of_3"",baz=""blip""}[1m])) or -+ sum by(foo,bar,__cortex_shard__) (avg_over_time(bar1{__cortex_shard__=""1_of_3"",baz=""blip""}[1m])) or -+ sum by(foo,bar,__cortex_shard__) (avg_over_time(bar1{__cortex_shard__=""2_of_3"",baz=""blip""}[1m])) -+ )`, -+ true, -+ }, -+ { -+ `sum by (foo,bar) (min_over_time(bar1{baz=""blip""}[1m]))`, -+ `sum by (foo,bar)( -+ sum by(foo,bar,__cortex_shard__) (min_over_time(bar1{__cortex_shard__=""0_of_3"",baz=""blip""}[1m])) or -+ sum by(foo,bar,__cortex_shard__) (min_over_time(bar1{__cortex_shard__=""1_of_3"",baz=""blip""}[1m])) or -+ sum by(foo,bar,__cortex_shard__) (min_over_time(bar1{__cortex_shard__=""2_of_3"",baz=""blip""}[1m])) -+ )`, -+ true, -+ }, -+ { -+ // Sub aggregations must avoid non-associative series merging across shards -+ `sum( -+ count( -+ bar1 -+ ) by (foo,bazz) -+ )`, -+ ` -+ sum without(__cortex_shard__) ( -+ sum by(__cortex_shard__) ( -+ count by(foo, bazz) (foo{__cortex_shard__=""0_of_2"",bar=""baz""}) -+ ) or -+ sum by(__cortex_shard__) ( -+ count by(foo, bazz) (foo{__cortex_shard__=""1_of_2"",bar=""baz""}) -+ ) -+ ) -+`, -+ false, -+ }, -+ { -+ // Note: this is a speculative optimization that we don't currently include due to mapping complexity. -+ // Certain sub aggregations may inject __cortex_shard__ for all (by) subgroupings. -+ // This is the same as the previous test with the exception that the shard label is injected to the count grouping -+ `sum( -+ count( -+ bar1 -+ ) by (foo,bazz) -+ )`, -+ ` -+ sum without(__cortex_shard__) ( -+ sum by(__cortex_shard__) ( -+ count by(foo, bazz, __cortex_shard__) (foo{__cortex_shard__=""0_of_2"",bar=""baz""}) -+ ) or -+ sum by(__cortex_shard__) ( -+ count by(foo, bazz, __cortex_shard__) (foo{__cortex_shard__=""1_of_2"",bar=""baz""}) -+ ) -+ ) -+`, -+ true, -+ }, -+ { -+ // Note: this is a speculative optimization that we don't currently include due to mapping complexity -+ // This example details multiple layers of aggregations. -+ // Sub aggregations must inject __cortex_shard__ for all (by) subgroupings. -+ `sum( -+ count( -+ count( -+ bar1 -+ ) by (foo,bazz) -+ ) by (bazz) -+ )`, -+ ` -+ sum without(__cortex_shard__) ( -+ sum by(__cortex_shard__) ( -+ count by(bazz, __cortex_shard__) ( -+ count by(foo, bazz, __cortex_shard__) ( -+ foo{__cortex_shard__=""0_of_2"", bar=""baz""} -+ ) -+ ) -+ ) or -+ sum by(__cortex_shard__) ( -+ count by(bazz, __cortex_shard__) ( -+ count by(foo, bazz, __cortex_shard__) ( -+ foo{__cortex_shard__=""1_of_2"", bar=""baz""} -+ ) -+ ) -+ ) -+ ) -+`, -+ true, -+ }, -+ } -+ -+ for _, tt := range tests { -+ tt := tt -+ t.Run(tt.normalQuery, func(t *testing.T) { -+ -+ baseQuery, err := engine.NewRangeQuery(shardAwareQueryable, tt.normalQuery, start, end, step) -+ require.Nil(t, err) -+ shardQuery, err := engine.NewRangeQuery(shardAwareQueryable, tt.shardQuery, start, end, step) -+ require.Nil(t, err) -+ baseResult := baseQuery.Exec(ctx) -+ shardResult := shardQuery.Exec(ctx) -+ t.Logf(""base: %v\n"", baseResult) -+ t.Logf(""shard: %v\n"", shardResult) -+ if tt.shouldEqual { -+ require.Equal(t, baseResult, shardResult) -+ return -+ } -+ require.NotEqual(t, baseResult, shardResult) -+ }) -+ } -+ -+} -+ -+func Test_FunctionParallelism(t *testing.T) { -+ tpl := `sum((bar1{}))` -+ shardTpl := `sum( -+ sum without(__cortex_shard__) ((bar1{__cortex_shard__=""0_of_3""})) or -+ sum without(__cortex_shard__) ((bar1{__cortex_shard__=""1_of_3""})) or -+ sum without(__cortex_shard__) ((bar1{__cortex_shard__=""2_of_3""})) -+ )` -+ -+ mkQuery := func(tpl, fn string, testMatrix bool, fArgs []string) (result string) { -+ result = strings.Replace(tpl, """", fn, -1) -+ -+ if testMatrix { -+ // turn selectors into ranges -+ result = strings.Replace(result, ""}"", ""}[1m]"", -1) -+ } -+ -+ if len(fArgs) > 0 { -+ args := "","" + strings.Join(fArgs, "","") -+ result = strings.Replace(result, """", args, -1) -+ } else { -+ result = strings.Replace(result, """", """", -1) -+ } -+ -+ return result -+ } -+ -+ for _, tc := range []struct { -+ fn string -+ fArgs []string -+ isTestMatrix bool -+ approximate bool -+ }{ -+ { -+ fn: ""abs"", -+ }, -+ { -+ fn: ""avg_over_time"", -+ isTestMatrix: true, -+ approximate: true, -+ }, -+ { -+ fn: ""ceil"", -+ }, -+ { -+ fn: ""changes"", -+ isTestMatrix: true, -+ }, -+ { -+ fn: ""count_over_time"", -+ isTestMatrix: true, -+ }, -+ { -+ fn: ""days_in_month"", -+ }, -+ { -+ fn: ""day_of_month"", -+ }, -+ { -+ fn: ""day_of_week"", -+ }, -+ { -+ fn: ""delta"", -+ isTestMatrix: true, -+ approximate: true, -+ }, -+ { -+ fn: ""deriv"", -+ isTestMatrix: true, -+ approximate: true, -+ }, -+ { -+ fn: ""exp"", -+ approximate: true, -+ }, -+ { -+ fn: ""floor"", -+ }, -+ { -+ fn: ""hour"", -+ }, -+ { -+ fn: ""idelta"", -+ isTestMatrix: true, -+ approximate: true, -+ }, -+ { -+ fn: ""increase"", -+ isTestMatrix: true, -+ approximate: true, -+ }, -+ { -+ fn: ""irate"", -+ isTestMatrix: true, -+ approximate: true, -+ }, -+ { -+ fn: ""ln"", -+ approximate: true, -+ }, -+ { -+ fn: ""log10"", -+ approximate: true, -+ }, -+ { -+ fn: ""log2"", -+ approximate: true, -+ }, -+ { -+ fn: ""max_over_time"", -+ isTestMatrix: true, -+ }, -+ { -+ fn: ""min_over_time"", -+ isTestMatrix: true, -+ }, -+ { -+ fn: ""minute"", -+ }, -+ { -+ fn: ""month"", -+ }, -+ { -+ fn: ""rate"", -+ isTestMatrix: true, -+ approximate: true, -+ }, -+ { -+ fn: ""resets"", -+ isTestMatrix: true, -+ }, -+ { -+ fn: ""sort"", -+ }, -+ { -+ fn: ""sort_desc"", -+ }, -+ { -+ fn: ""sqrt"", -+ approximate: true, -+ }, -+ { -+ fn: ""stddev_over_time"", -+ isTestMatrix: true, -+ approximate: true, -+ }, -+ { -+ fn: ""stdvar_over_time"", -+ isTestMatrix: true, -+ approximate: true, -+ }, -+ { -+ fn: ""sum_over_time"", -+ isTestMatrix: true, -+ }, -+ { -+ fn: ""timestamp"", -+ }, -+ { -+ fn: ""year"", -+ }, -+ { -+ fn: ""clamp_max"", -+ fArgs: []string{""5""}, -+ }, -+ { -+ fn: ""clamp_min"", -+ fArgs: []string{""5""}, -+ }, -+ { -+ fn: ""predict_linear"", -+ isTestMatrix: true, -+ approximate: true, -+ fArgs: []string{""1""}, -+ }, -+ { -+ fn: ""round"", -+ fArgs: []string{""20""}, -+ }, -+ { -+ fn: ""holt_winters"", -+ isTestMatrix: true, -+ fArgs: []string{""0.5"", ""0.7""}, -+ approximate: true, -+ }, -+ } { -+ -+ t.Run(tc.fn, func(t *testing.T) { -+ baseQuery, err := engine.NewRangeQuery( -+ shardAwareQueryable, -+ mkQuery(tpl, tc.fn, tc.isTestMatrix, tc.fArgs), -+ start, -+ end, -+ step, -+ ) -+ require.Nil(t, err) -+ shardQuery, err := engine.NewRangeQuery( -+ shardAwareQueryable, -+ mkQuery(shardTpl, tc.fn, tc.isTestMatrix, tc.fArgs), -+ start, -+ end, -+ step, -+ ) -+ require.Nil(t, err) -+ baseResult := baseQuery.Exec(ctx) -+ shardResult := shardQuery.Exec(ctx) -+ t.Logf(""base: %+v\n"", baseResult) -+ t.Logf(""shard: %+v\n"", shardResult) -+ if !tc.approximate { -+ require.Equal(t, baseResult, shardResult) -+ } else { -+ // Some functions yield tiny differences when sharded due to combining floating point calculations. -+ baseSeries := baseResult.Value.(promql.Matrix)[0] -+ shardSeries := shardResult.Value.(promql.Matrix)[0] -+ -+ require.Equal(t, len(baseSeries.Points), len(shardSeries.Points)) -+ for i, basePt := range baseSeries.Points { -+ shardPt := shardSeries.Points[i] -+ require.Equal(t, basePt.T, shardPt.T) -+ require.Equal( -+ t, -+ math.Round(basePt.V*1e6)/1e6, -+ math.Round(shardPt.V*1e6)/1e6, -+ ) -+ } -+ -+ } -+ }) -+ } -+ -+} -+ -+var shardAwareQueryable = storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { -+ return &testMatrix{ -+ series: []*promql.StorageSeries{ -+ newSeries(labels.Labels{{Name: ""__name__"", Value: ""bar1""}, {Name: ""baz"", Value: ""blip""}, {Name: ""bar"", Value: ""blop""}, {Name: ""foo"", Value: ""barr""}}, factor(5)), -+ newSeries(labels.Labels{{Name: ""__name__"", Value: ""bar1""}, {Name: ""baz"", Value: ""blip""}, {Name: ""bar"", Value: ""blop""}, {Name: ""foo"", Value: ""bazz""}}, factor(7)), -+ newSeries(labels.Labels{{Name: ""__name__"", Value: ""bar1""}, {Name: ""baz"", Value: ""blip""}, {Name: ""bar"", Value: ""blap""}, {Name: ""foo"", Value: ""buzz""}}, factor(12)), -+ newSeries(labels.Labels{{Name: ""__name__"", Value: ""bar1""}, {Name: ""baz"", Value: ""blip""}, {Name: ""bar"", Value: ""blap""}, {Name: ""foo"", Value: ""bozz""}}, factor(11)), -+ newSeries(labels.Labels{{Name: ""__name__"", Value: ""bar1""}, {Name: ""baz"", Value: ""blip""}, {Name: ""bar"", Value: ""blop""}, {Name: ""foo"", Value: ""buzz""}}, factor(8)), -+ newSeries(labels.Labels{{Name: ""__name__"", Value: ""bar1""}, {Name: ""baz"", Value: ""blip""}, {Name: ""bar"", Value: ""blap""}, {Name: ""foo"", Value: ""bazz""}}, identity), -+ }, -+ }, nil -+}) -+ -+type testMatrix struct { -+ series []*promql.StorageSeries -+} -+ -+func (m *testMatrix) Copy() *testMatrix { -+ cpy := *m -+ return &cpy -+} -+ -+func (m testMatrix) Next() bool { return len(m.series) != 0 } -+ -+func (m *testMatrix) At() storage.Series { -+ res := m.series[0] -+ m.series = m.series[1:] -+ return res -+} -+ -+func (m *testMatrix) Err() error { return nil } -+ -+func (m *testMatrix) Warnings() storage.Warnings { return nil } -+ -+func (m *testMatrix) Select(_ bool, selectParams *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { -+ s, _, err := astmapper.ShardFromMatchers(matchers) -+ if err != nil { -+ return storage.ErrSeriesSet(err) -+ } -+ -+ if s != nil { -+ return splitByShard(s.Shard, s.Of, m) -+ } -+ -+ return m.Copy() -+} -+ -+func (m *testMatrix) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { -+ return nil, nil, nil -+} -+func (m *testMatrix) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { -+ return nil, nil, nil -+} -+func (m *testMatrix) Close() error { return nil } -+ -+func newSeries(metric labels.Labels, generator func(float64) float64) *promql.StorageSeries { -+ sort.Sort(metric) -+ var points []promql.Point -+ -+ for ts := start.Add(-step); ts.Unix() <= end.Unix(); ts = ts.Add(step) { -+ t := ts.Unix() * 1e3 -+ points = append(points, promql.Point{ -+ T: t, -+ V: generator(float64(t)), -+ }) -+ } -+ -+ return promql.NewStorageSeries(promql.Series{ -+ Metric: metric, -+ Points: points, -+ }) -+} -+ -+func identity(t float64) float64 { -+ return t -+} -+ -+func factor(f float64) func(float64) float64 { -+ i := 0. -+ return func(float64) float64 { -+ i++ -+ res := i * f -+ return res -+ } -+} -+ -+// var identity(t int64) float64 { -+// return float64(t) -+// } -+ -+// splitByShard returns the shard subset of a testMatrix. -+// e.g if a testMatrix has 6 series, and we want 3 shard, then each shard will contain -+// 2 series. -+func splitByShard(shardIndex, shardTotal int, testMatrices *testMatrix) *testMatrix { -+ res := &testMatrix{} -+ for i, s := range testMatrices.series { -+ if i%shardTotal != shardIndex { -+ continue -+ } -+ var points []promql.Point -+ it := s.Iterator() -+ for it.Next() { -+ t, v := it.At() -+ points = append(points, promql.Point{ -+ T: t, -+ V: v, -+ }) -+ -+ } -+ lbs := s.Labels().Copy() -+ lbs = append(lbs, labels.Label{Name: ""__cortex_shard__"", Value: fmt.Sprintf(""%d_of_%d"", shardIndex, shardTotal)}) -+ sort.Sort(lbs) -+ res.series = append(res.series, promql.NewStorageSeries(promql.Series{ -+ Metric: lbs, -+ Points: points, -+ })) -+ } -+ return res -+} -diff --git a/pkg/querier/queryrange/queryrangebase/query_range.go b/pkg/querier/queryrange/queryrangebase/query_range.go -index 7acab4b155c99..c111ac478c8cd 100644 ---- a/pkg/querier/queryrange/queryrangebase/query_range.go -+++ b/pkg/querier/queryrange/queryrangebase/query_range.go -@@ -31,7 +31,8 @@ import ( - const StatusSuccess = ""success"" - - var ( -- json = jsoniter.Config{ -+ matrix = model.ValMatrix.String() -+ json = jsoniter.Config{ - EscapeHTML: false, // No HTML in our responses. - SortMapKeys: true, - ValidateJsonRawMessage: true, -diff --git a/pkg/querier/queryrange/queryrangebase/query_range_test.go b/pkg/querier/queryrange/queryrangebase/query_range_test.go -new file mode 100644 -index 0000000000000..09c55ffd77f9c ---- /dev/null -+++ b/pkg/querier/queryrange/queryrangebase/query_range_test.go -@@ -0,0 +1,342 @@ -+package queryrangebase -+ -+import ( -+ ""bytes"" -+ ""context"" -+ ""io/ioutil"" -+ ""net/http"" -+ ""strconv"" -+ ""testing"" -+ -+ ""github.com/cortexproject/cortex/pkg/cortexpb"" -+ jsoniter ""github.com/json-iterator/go"" -+ ""github.com/stretchr/testify/assert"" -+ ""github.com/stretchr/testify/require"" -+ ""github.com/weaveworks/common/httpgrpc"" -+ ""github.com/weaveworks/common/user"" -+) -+ -+func TestRequest(t *testing.T) { -+ // Create a Copy parsedRequest to assign the expected headers to the request without affecting other tests using the global. -+ // The test below adds a Test-Header header to the request and expects it back once the encode/decode of request is done via PrometheusCodec -+ parsedRequestWithHeaders := *parsedRequest -+ parsedRequestWithHeaders.Headers = reqHeaders -+ for i, tc := range []struct { -+ url string -+ expected Request -+ expectedErr error -+ }{ -+ { -+ url: query, -+ expected: &parsedRequestWithHeaders, -+ }, -+ { -+ url: ""api/v1/query_range?start=foo"", -+ expectedErr: httpgrpc.Errorf(http.StatusBadRequest, ""invalid parameter \""start\""; cannot parse \""foo\"" to a valid timestamp""), -+ }, -+ { -+ url: ""api/v1/query_range?start=123&end=bar"", -+ expectedErr: httpgrpc.Errorf(http.StatusBadRequest, ""invalid parameter \""end\""; cannot parse \""bar\"" to a valid timestamp""), -+ }, -+ { -+ url: ""api/v1/query_range?start=123&end=0"", -+ expectedErr: errEndBeforeStart, -+ }, -+ { -+ url: ""api/v1/query_range?start=123&end=456&step=baz"", -+ expectedErr: httpgrpc.Errorf(http.StatusBadRequest, ""invalid parameter \""step\""; cannot parse \""baz\"" to a valid duration""), -+ }, -+ { -+ url: ""api/v1/query_range?start=123&end=456&step=-1"", -+ expectedErr: errNegativeStep, -+ }, -+ { -+ url: ""api/v1/query_range?start=0&end=11001&step=1"", -+ expectedErr: errStepTooSmall, -+ }, -+ } { -+ t.Run(strconv.Itoa(i), func(t *testing.T) { -+ r, err := http.NewRequest(""GET"", tc.url, nil) -+ require.NoError(t, err) -+ r.Header.Add(""Test-Header"", ""test"") -+ -+ ctx := user.InjectOrgID(context.Background(), ""1"") -+ -+ // Get a deep copy of the request with Context changed to ctx -+ r = r.Clone(ctx) -+ -+ req, err := PrometheusCodec.DecodeRequest(ctx, r, []string{""Test-Header""}) -+ if err != nil { -+ require.EqualValues(t, tc.expectedErr, err) -+ return -+ } -+ require.EqualValues(t, tc.expected, req) -+ -+ rdash, err := PrometheusCodec.EncodeRequest(context.Background(), req) -+ require.NoError(t, err) -+ require.EqualValues(t, tc.url, rdash.RequestURI) -+ }) -+ } -+} -+ -+func TestResponse(t *testing.T) { -+ r := *parsedResponse -+ r.Headers = respHeaders -+ for i, tc := range []struct { -+ body string -+ expected *PrometheusResponse -+ }{ -+ { -+ body: responseBody, -+ expected: &r, -+ }, -+ } { -+ t.Run(strconv.Itoa(i), func(t *testing.T) { -+ response := &http.Response{ -+ StatusCode: 200, -+ Header: http.Header{""Content-Type"": []string{""application/json""}}, -+ Body: ioutil.NopCloser(bytes.NewBuffer([]byte(tc.body))), -+ } -+ resp, err := PrometheusCodec.DecodeResponse(context.Background(), response, nil) -+ require.NoError(t, err) -+ assert.Equal(t, tc.expected, resp) -+ -+ // Reset response, as the above call will have consumed the body reader. -+ response = &http.Response{ -+ StatusCode: 200, -+ Header: http.Header{""Content-Type"": []string{""application/json""}}, -+ Body: ioutil.NopCloser(bytes.NewBuffer([]byte(tc.body))), -+ ContentLength: int64(len(tc.body)), -+ } -+ resp2, err := PrometheusCodec.EncodeResponse(context.Background(), resp) -+ require.NoError(t, err) -+ assert.Equal(t, response, resp2) -+ }) -+ } -+} -+ -+func TestMergeAPIResponses(t *testing.T) { -+ for _, tc := range []struct { -+ name string -+ input []Response -+ expected Response -+ }{ -+ { -+ name: ""No responses shouldn't panic and return a non-null result and result type."", -+ input: []Response{}, -+ expected: &PrometheusResponse{ -+ Status: StatusSuccess, -+ Data: PrometheusData{ -+ ResultType: matrix, -+ Result: []SampleStream{}, -+ }, -+ }, -+ }, -+ -+ { -+ name: ""A single empty response shouldn't panic."", -+ input: []Response{ -+ &PrometheusResponse{ -+ Data: PrometheusData{ -+ ResultType: matrix, -+ Result: []SampleStream{}, -+ }, -+ }, -+ }, -+ expected: &PrometheusResponse{ -+ Status: StatusSuccess, -+ Data: PrometheusData{ -+ ResultType: matrix, -+ Result: []SampleStream{}, -+ }, -+ }, -+ }, -+ -+ { -+ name: ""Multiple empty responses shouldn't panic."", -+ input: []Response{ -+ &PrometheusResponse{ -+ Data: PrometheusData{ -+ ResultType: matrix, -+ Result: []SampleStream{}, -+ }, -+ }, -+ &PrometheusResponse{ -+ Data: PrometheusData{ -+ ResultType: matrix, -+ Result: []SampleStream{}, -+ }, -+ }, -+ }, -+ expected: &PrometheusResponse{ -+ Status: StatusSuccess, -+ Data: PrometheusData{ -+ ResultType: matrix, -+ Result: []SampleStream{}, -+ }, -+ }, -+ }, -+ -+ { -+ name: ""Basic merging of two responses."", -+ input: []Response{ -+ &PrometheusResponse{ -+ Data: PrometheusData{ -+ ResultType: matrix, -+ Result: []SampleStream{ -+ { -+ Labels: []cortexpb.LabelAdapter{}, -+ Samples: []cortexpb.Sample{ -+ {Value: 0, TimestampMs: 0}, -+ {Value: 1, TimestampMs: 1}, -+ }, -+ }, -+ }, -+ }, -+ }, -+ &PrometheusResponse{ -+ Data: PrometheusData{ -+ ResultType: matrix, -+ Result: []SampleStream{ -+ { -+ Labels: []cortexpb.LabelAdapter{}, -+ Samples: []cortexpb.Sample{ -+ {Value: 2, TimestampMs: 2}, -+ {Value: 3, TimestampMs: 3}, -+ }, -+ }, -+ }, -+ }, -+ }, -+ }, -+ expected: &PrometheusResponse{ -+ Status: StatusSuccess, -+ Data: PrometheusData{ -+ ResultType: matrix, -+ Result: []SampleStream{ -+ { -+ Labels: []cortexpb.LabelAdapter{}, -+ Samples: []cortexpb.Sample{ -+ {Value: 0, TimestampMs: 0}, -+ {Value: 1, TimestampMs: 1}, -+ {Value: 2, TimestampMs: 2}, -+ {Value: 3, TimestampMs: 3}, -+ }, -+ }, -+ }, -+ }, -+ }, -+ }, -+ -+ { -+ name: ""Merging of responses when labels are in different order."", -+ input: []Response{ -+ mustParse(t, `{""status"":""success"",""data"":{""resultType"":""matrix"",""result"":[{""metric"":{""a"":""b"",""c"":""d""},""values"":[[0,""0""],[1,""1""]]}]}}`), -+ mustParse(t, `{""status"":""success"",""data"":{""resultType"":""matrix"",""result"":[{""metric"":{""c"":""d"",""a"":""b""},""values"":[[2,""2""],[3,""3""]]}]}}`), -+ }, -+ expected: &PrometheusResponse{ -+ Status: StatusSuccess, -+ Data: PrometheusData{ -+ ResultType: matrix, -+ Result: []SampleStream{ -+ { -+ Labels: []cortexpb.LabelAdapter{{Name: ""a"", Value: ""b""}, {Name: ""c"", Value: ""d""}}, -+ Samples: []cortexpb.Sample{ -+ {Value: 0, TimestampMs: 0}, -+ {Value: 1, TimestampMs: 1000}, -+ {Value: 2, TimestampMs: 2000}, -+ {Value: 3, TimestampMs: 3000}, -+ }, -+ }, -+ }, -+ }, -+ }, -+ }, -+ -+ { -+ name: ""Merging of samples where there is single overlap."", -+ input: []Response{ -+ mustParse(t, `{""status"":""success"",""data"":{""resultType"":""matrix"",""result"":[{""metric"":{""a"":""b"",""c"":""d""},""values"":[[1,""1""],[2,""2""]]}]}}`), -+ mustParse(t, `{""status"":""success"",""data"":{""resultType"":""matrix"",""result"":[{""metric"":{""c"":""d"",""a"":""b""},""values"":[[2,""2""],[3,""3""]]}]}}`), -+ }, -+ expected: &PrometheusResponse{ -+ Status: StatusSuccess, -+ Data: PrometheusData{ -+ ResultType: matrix, -+ Result: []SampleStream{ -+ { -+ Labels: []cortexpb.LabelAdapter{{Name: ""a"", Value: ""b""}, {Name: ""c"", Value: ""d""}}, -+ Samples: []cortexpb.Sample{ -+ {Value: 1, TimestampMs: 1000}, -+ {Value: 2, TimestampMs: 2000}, -+ {Value: 3, TimestampMs: 3000}, -+ }, -+ }, -+ }, -+ }, -+ }, -+ }, -+ { -+ name: ""Merging of samples where there is multiple partial overlaps."", -+ input: []Response{ -+ mustParse(t, `{""status"":""success"",""data"":{""resultType"":""matrix"",""result"":[{""metric"":{""a"":""b"",""c"":""d""},""values"":[[1,""1""],[2,""2""],[3,""3""]]}]}}`), -+ mustParse(t, `{""status"":""success"",""data"":{""resultType"":""matrix"",""result"":[{""metric"":{""c"":""d"",""a"":""b""},""values"":[[2,""2""],[3,""3""],[4,""4""],[5,""5""]]}]}}`), -+ }, -+ expected: &PrometheusResponse{ -+ Status: StatusSuccess, -+ Data: PrometheusData{ -+ ResultType: matrix, -+ Result: []SampleStream{ -+ { -+ Labels: []cortexpb.LabelAdapter{{Name: ""a"", Value: ""b""}, {Name: ""c"", Value: ""d""}}, -+ Samples: []cortexpb.Sample{ -+ {Value: 1, TimestampMs: 1000}, -+ {Value: 2, TimestampMs: 2000}, -+ {Value: 3, TimestampMs: 3000}, -+ {Value: 4, TimestampMs: 4000}, -+ {Value: 5, TimestampMs: 5000}, -+ }, -+ }, -+ }, -+ }, -+ }, -+ }, -+ { -+ name: ""Merging of samples where there is complete overlap."", -+ input: []Response{ -+ mustParse(t, `{""status"":""success"",""data"":{""resultType"":""matrix"",""result"":[{""metric"":{""a"":""b"",""c"":""d""},""values"":[[2,""2""],[3,""3""]]}]}}`), -+ mustParse(t, `{""status"":""success"",""data"":{""resultType"":""matrix"",""result"":[{""metric"":{""c"":""d"",""a"":""b""},""values"":[[2,""2""],[3,""3""],[4,""4""],[5,""5""]]}]}}`), -+ }, -+ expected: &PrometheusResponse{ -+ Status: StatusSuccess, -+ Data: PrometheusData{ -+ ResultType: matrix, -+ Result: []SampleStream{ -+ { -+ Labels: []cortexpb.LabelAdapter{{Name: ""a"", Value: ""b""}, {Name: ""c"", Value: ""d""}}, -+ Samples: []cortexpb.Sample{ -+ {Value: 2, TimestampMs: 2000}, -+ {Value: 3, TimestampMs: 3000}, -+ {Value: 4, TimestampMs: 4000}, -+ {Value: 5, TimestampMs: 5000}, -+ }, -+ }, -+ }, -+ }, -+ }, -+ }} { -+ t.Run(tc.name, func(t *testing.T) { -+ output, err := PrometheusCodec.MergeResponse(tc.input...) -+ require.NoError(t, err) -+ require.Equal(t, tc.expected, output) -+ }) -+ } -+} -+ -+func mustParse(t *testing.T, response string) Response { -+ var resp PrometheusResponse -+ // Needed as goimports automatically add a json import otherwise. -+ json := jsoniter.ConfigCompatibleWithStandardLibrary -+ require.NoError(t, json.Unmarshal([]byte(response), &resp)) -+ return &resp -+} -diff --git a/pkg/querier/queryrange/queryrangebase/queryable_test.go b/pkg/querier/queryrange/queryrangebase/queryable_test.go -new file mode 100644 -index 0000000000000..ed1de014bfc63 ---- /dev/null -+++ b/pkg/querier/queryrange/queryrangebase/queryable_test.go -@@ -0,0 +1,270 @@ -+package queryrangebase -+ -+import ( -+ ""context"" -+ ""testing"" -+ -+ ""github.com/cortexproject/cortex/pkg/cortexpb"" -+ ""github.com/pkg/errors"" -+ ""github.com/prometheus/prometheus/model/labels"" -+ ""github.com/prometheus/prometheus/promql/parser"" -+ ""github.com/stretchr/testify/require"" -+ -+ ""github.com/grafana/loki/pkg/querier/astmapper"" -+) -+ -+func TestSelect(t *testing.T) { -+ var testExpr = []struct { -+ name string -+ querier *ShardedQuerier -+ fn func(*testing.T, *ShardedQuerier) -+ }{ -+ { -+ name: ""errors non embedded query"", -+ querier: mkQuerier( -+ nil, -+ ), -+ fn: func(t *testing.T, q *ShardedQuerier) { -+ set := q.Select(false, nil) -+ require.EqualError(t, set.Err(), nonEmbeddedErrMsg) -+ }, -+ }, -+ { -+ name: ""replaces query"", -+ querier: mkQuerier(mockHandlerWith( -+ &PrometheusResponse{}, -+ nil, -+ )), -+ fn: func(t *testing.T, q *ShardedQuerier) { -+ -+ expected := &PrometheusResponse{ -+ Status: ""success"", -+ Data: PrometheusData{ -+ ResultType: string(parser.ValueTypeVector), -+ }, -+ } -+ -+ // override handler func to assert new query has been substituted -+ q.Handler = HandlerFunc( -+ func(ctx context.Context, req Request) (Response, error) { -+ require.Equal(t, `http_requests_total{cluster=""prod""}`, req.GetQuery()) -+ return expected, nil -+ }, -+ ) -+ -+ encoded, err := astmapper.JSONCodec.Encode([]string{`http_requests_total{cluster=""prod""}`}) -+ require.Nil(t, err) -+ set := q.Select( -+ false, -+ nil, -+ exactMatch(""__name__"", astmapper.EmbeddedQueriesMetricName), -+ exactMatch(astmapper.QueryLabel, encoded), -+ ) -+ require.Nil(t, set.Err()) -+ }, -+ }, -+ { -+ name: ""propagates response error"", -+ querier: mkQuerier(mockHandlerWith( -+ &PrometheusResponse{ -+ Error: ""SomeErr"", -+ }, -+ nil, -+ )), -+ fn: func(t *testing.T, q *ShardedQuerier) { -+ encoded, err := astmapper.JSONCodec.Encode([]string{`http_requests_total{cluster=""prod""}`}) -+ require.Nil(t, err) -+ set := q.Select( -+ false, -+ nil, -+ exactMatch(""__name__"", astmapper.EmbeddedQueriesMetricName), -+ exactMatch(astmapper.QueryLabel, encoded), -+ ) -+ require.EqualError(t, set.Err(), ""SomeErr"") -+ }, -+ }, -+ { -+ name: ""returns SeriesSet"", -+ querier: mkQuerier(mockHandlerWith( -+ &PrometheusResponse{ -+ Data: PrometheusData{ -+ ResultType: string(parser.ValueTypeVector), -+ Result: []SampleStream{ -+ { -+ Labels: []cortexpb.LabelAdapter{ -+ {Name: ""a"", Value: ""a1""}, -+ {Name: ""b"", Value: ""b1""}, -+ }, -+ Samples: []cortexpb.Sample{ -+ { -+ Value: 1, -+ TimestampMs: 1, -+ }, -+ { -+ Value: 2, -+ TimestampMs: 2, -+ }, -+ }, -+ }, -+ { -+ Labels: []cortexpb.LabelAdapter{ -+ {Name: ""a"", Value: ""a1""}, -+ {Name: ""b"", Value: ""b1""}, -+ }, -+ Samples: []cortexpb.Sample{ -+ { -+ Value: 8, -+ TimestampMs: 1, -+ }, -+ { -+ Value: 9, -+ TimestampMs: 2, -+ }, -+ }, -+ }, -+ }, -+ }, -+ }, -+ nil, -+ )), -+ fn: func(t *testing.T, q *ShardedQuerier) { -+ encoded, err := astmapper.JSONCodec.Encode([]string{`http_requests_total{cluster=""prod""}`}) -+ require.Nil(t, err) -+ set := q.Select( -+ false, -+ nil, -+ exactMatch(""__name__"", astmapper.EmbeddedQueriesMetricName), -+ exactMatch(astmapper.QueryLabel, encoded), -+ ) -+ require.Nil(t, set.Err()) -+ require.Equal( -+ t, -+ NewSeriesSet([]SampleStream{ -+ { -+ Labels: []cortexpb.LabelAdapter{ -+ {Name: ""a"", Value: ""a1""}, -+ {Name: ""b"", Value: ""b1""}, -+ }, -+ Samples: []cortexpb.Sample{ -+ { -+ Value: 1, -+ TimestampMs: 1, -+ }, -+ { -+ Value: 2, -+ TimestampMs: 2, -+ }, -+ }, -+ }, -+ { -+ Labels: []cortexpb.LabelAdapter{ -+ {Name: ""a"", Value: ""a1""}, -+ {Name: ""b"", Value: ""b1""}, -+ }, -+ Samples: []cortexpb.Sample{ -+ { -+ Value: 8, -+ TimestampMs: 1, -+ }, -+ { -+ Value: 9, -+ TimestampMs: 2, -+ }, -+ }, -+ }, -+ }), -+ set, -+ ) -+ }, -+ }, -+ } -+ -+ for _, c := range testExpr { -+ t.Run(c.name, func(t *testing.T) { -+ c.fn(t, c.querier) -+ }) -+ } -+} -+ -+func TestSelectConcurrent(t *testing.T) { -+ for _, c := range []struct { -+ name string -+ queries []string -+ err error -+ }{ -+ { -+ name: ""concats queries"", -+ queries: []string{ -+ `sum by(__cortex_shard__) (rate(bar1{__cortex_shard__=""0_of_3"",baz=""blip""}[1m]))`, -+ `sum by(__cortex_shard__) (rate(bar1{__cortex_shard__=""1_of_3"",baz=""blip""}[1m]))`, -+ `sum by(__cortex_shard__) (rate(bar1{__cortex_shard__=""2_of_3"",baz=""blip""}[1m]))`, -+ }, -+ err: nil, -+ }, -+ { -+ name: ""errors"", -+ queries: []string{ -+ `sum by(__cortex_shard__) (rate(bar1{__cortex_shard__=""0_of_3"",baz=""blip""}[1m]))`, -+ `sum by(__cortex_shard__) (rate(bar1{__cortex_shard__=""1_of_3"",baz=""blip""}[1m]))`, -+ `sum by(__cortex_shard__) (rate(bar1{__cortex_shard__=""2_of_3"",baz=""blip""}[1m]))`, -+ }, -+ err: errors.Errorf(""some-err""), -+ }, -+ } { -+ -+ t.Run(c.name, func(t *testing.T) { -+ // each request will return a single samplestream -+ querier := mkQuerier(mockHandlerWith(&PrometheusResponse{ -+ Data: PrometheusData{ -+ ResultType: string(parser.ValueTypeVector), -+ Result: []SampleStream{ -+ { -+ Labels: []cortexpb.LabelAdapter{ -+ {Name: ""a"", Value: ""1""}, -+ }, -+ Samples: []cortexpb.Sample{ -+ { -+ Value: 1, -+ TimestampMs: 1, -+ }, -+ }, -+ }, -+ }, -+ }, -+ }, c.err)) -+ -+ encoded, err := astmapper.JSONCodec.Encode(c.queries) -+ require.Nil(t, err) -+ set := querier.Select( -+ false, -+ nil, -+ exactMatch(""__name__"", astmapper.EmbeddedQueriesMetricName), -+ exactMatch(astmapper.QueryLabel, encoded), -+ ) -+ -+ if c.err != nil { -+ require.EqualError(t, set.Err(), c.err.Error()) -+ return -+ } -+ -+ var ct int -+ for set.Next() { -+ ct++ -+ } -+ require.Equal(t, len(c.queries), ct) -+ }) -+ } -+} -+ -+func exactMatch(k, v string) *labels.Matcher { -+ m, err := labels.NewMatcher(labels.MatchEqual, k, v) -+ if err != nil { -+ panic(err) -+ } -+ return m -+ -+} -+ -+func mkQuerier(handler Handler) *ShardedQuerier { -+ return &ShardedQuerier{Ctx: context.Background(), Req: &PrometheusRequest{}, Handler: handler, ResponseHeaders: map[string][]string{}} -+} -diff --git a/pkg/querier/queryrange/queryrangebase/querysharding_test.go b/pkg/querier/queryrange/queryrangebase/querysharding_test.go -new file mode 100644 -index 0000000000000..b32a8351c79e8 ---- /dev/null -+++ b/pkg/querier/queryrange/queryrangebase/querysharding_test.go -@@ -0,0 +1,664 @@ -+package queryrangebase -+ -+import ( -+ ""context"" -+ ""fmt"" -+ ""math"" -+ ""runtime"" -+ ""testing"" -+ ""time"" -+ -+ ""github.com/cortexproject/cortex/pkg/cortexpb"" -+ ""github.com/cortexproject/cortex/pkg/util"" -+ ""github.com/go-kit/log"" -+ ""github.com/pkg/errors"" -+ ""github.com/prometheus/common/model"" -+ ""github.com/prometheus/prometheus/promql"" -+ ""github.com/prometheus/prometheus/promql/parser"" -+ ""github.com/prometheus/prometheus/storage"" -+ ""github.com/stretchr/testify/require"" -+ -+ ""github.com/grafana/loki/pkg/storage/chunk"" -+) -+ -+func TestQueryshardingMiddleware(t *testing.T) { -+ var testExpr = []struct { -+ name string -+ next Handler -+ input Request -+ ctx context.Context -+ expected *PrometheusResponse -+ err bool -+ override func(*testing.T, Handler) -+ }{ -+ { -+ name: ""invalid query error"", -+ // if the query parses correctly force it to succeed -+ next: mockHandlerWith(&PrometheusResponse{ -+ Status: """", -+ Data: PrometheusData{ -+ ResultType: string(parser.ValueTypeVector), -+ Result: []SampleStream{}, -+ }, -+ ErrorType: """", -+ Error: """", -+ }, nil), -+ input: &PrometheusRequest{Query: ""^GARBAGE""}, -+ ctx: context.Background(), -+ expected: nil, -+ err: true, -+ }, -+ { -+ name: ""downstream err"", -+ next: mockHandlerWith(nil, errors.Errorf(""some err"")), -+ input: defaultReq(), -+ ctx: context.Background(), -+ expected: nil, -+ err: true, -+ }, -+ { -+ name: ""successful trip"", -+ next: mockHandlerWith(sampleMatrixResponse(), nil), -+ override: func(t *testing.T, handler Handler) { -+ -+ // pre-encode the query so it doesn't try to re-split. We're just testing if it passes through correctly -+ qry := defaultReq().WithQuery( -+ `__embedded_queries__{__cortex_queries__=""{\""Concat\"":[\""http_requests_total{cluster=\\\""prod\\\""}\""]}""}`, -+ ) -+ out, err := handler.Do(context.Background(), qry) -+ require.Nil(t, err) -+ require.Equal(t, string(parser.ValueTypeMatrix), out.(*PrometheusResponse).Data.ResultType) -+ require.Equal(t, sampleMatrixResponse(), out) -+ }, -+ }, -+ } -+ -+ for _, c := range testExpr { -+ t.Run(c.name, func(t *testing.T) { -+ engine := promql.NewEngine(promql.EngineOpts{ -+ Logger: log.NewNopLogger(), -+ Reg: nil, -+ MaxSamples: 1000, -+ Timeout: time.Minute, -+ }) -+ -+ handler := NewQueryShardMiddleware( -+ log.NewNopLogger(), -+ engine, -+ ShardingConfigs{ -+ { -+ RowShards: 3, -+ }, -+ }, -+ PrometheusCodec, -+ 0, -+ nil, -+ nil, -+ ).Wrap(c.next) -+ -+ // escape hatch for custom tests -+ if c.override != nil { -+ c.override(t, handler) -+ return -+ } -+ -+ out, err := handler.Do(c.ctx, c.input) -+ -+ if c.err { -+ require.NotNil(t, err) -+ } else { -+ require.Nil(t, err) -+ require.Equal(t, c.expected, out) -+ } -+ -+ }) -+ } -+} -+ -+func sampleMatrixResponse() *PrometheusResponse { -+ return &PrometheusResponse{ -+ Status: StatusSuccess, -+ Data: PrometheusData{ -+ ResultType: string(parser.ValueTypeMatrix), -+ Result: []SampleStream{ -+ { -+ Labels: []cortexpb.LabelAdapter{ -+ {Name: ""a"", Value: ""a1""}, -+ {Name: ""b"", Value: ""b1""}, -+ }, -+ Samples: []cortexpb.Sample{ -+ { -+ TimestampMs: 5, -+ Value: 1, -+ }, -+ { -+ TimestampMs: 10, -+ Value: 2, -+ }, -+ }, -+ }, -+ { -+ Labels: []cortexpb.LabelAdapter{ -+ {Name: ""a"", Value: ""a1""}, -+ {Name: ""b"", Value: ""b1""}, -+ }, -+ Samples: []cortexpb.Sample{ -+ { -+ TimestampMs: 5, -+ Value: 8, -+ }, -+ { -+ TimestampMs: 10, -+ Value: 9, -+ }, -+ }, -+ }, -+ }, -+ }, -+ } -+} -+ -+func mockHandlerWith(resp *PrometheusResponse, err error) Handler { -+ return HandlerFunc(func(ctx context.Context, req Request) (Response, error) { -+ if expired := ctx.Err(); expired != nil { -+ return nil, expired -+ } -+ -+ return resp, err -+ }) -+} -+ -+func defaultReq() *PrometheusRequest { -+ return &PrometheusRequest{ -+ Path: ""/query_range"", -+ Start: 00, -+ End: 10, -+ Step: 5, -+ Timeout: time.Minute, -+ Query: `sum(rate(http_requests_total{}[5m]))`, -+ } -+} -+ -+func TestShardingConfigs_ValidRange(t *testing.T) { -+ reqWith := func(start, end string) *PrometheusRequest { -+ r := defaultReq() -+ -+ if start != """" { -+ r.Start = int64(parseDate(start)) -+ } -+ -+ if end != """" { -+ r.End = int64(parseDate(end)) -+ } -+ -+ return r -+ } -+ -+ var testExpr = []struct { -+ name string -+ confs ShardingConfigs -+ req *PrometheusRequest -+ expected chunk.PeriodConfig -+ err error -+ }{ -+ { -+ name: ""0 ln configs fail"", -+ confs: ShardingConfigs{}, -+ req: defaultReq(), -+ err: errInvalidShardingRange, -+ }, -+ { -+ name: ""request starts before beginning config"", -+ confs: ShardingConfigs{ -+ { -+ From: chunk.DayTime{Time: parseDate(""2019-10-16"")}, -+ RowShards: 1, -+ }, -+ }, -+ req: reqWith(""2019-10-15"", """"), -+ err: errInvalidShardingRange, -+ }, -+ { -+ name: ""request spans multiple configs"", -+ confs: ShardingConfigs{ -+ { -+ From: chunk.DayTime{Time: parseDate(""2019-10-16"")}, -+ RowShards: 1, -+ }, -+ { -+ From: chunk.DayTime{Time: parseDate(""2019-11-16"")}, -+ RowShards: 2, -+ }, -+ }, -+ req: reqWith(""2019-10-15"", ""2019-11-17""), -+ err: errInvalidShardingRange, -+ }, -+ { -+ name: ""selects correct config "", -+ confs: ShardingConfigs{ -+ { -+ From: chunk.DayTime{Time: parseDate(""2019-10-16"")}, -+ RowShards: 1, -+ }, -+ { -+ From: chunk.DayTime{Time: parseDate(""2019-11-16"")}, -+ RowShards: 2, -+ }, -+ { -+ From: chunk.DayTime{Time: parseDate(""2019-12-16"")}, -+ RowShards: 3, -+ }, -+ }, -+ req: reqWith(""2019-11-20"", ""2019-11-25""), -+ expected: chunk.PeriodConfig{ -+ From: chunk.DayTime{Time: parseDate(""2019-11-16"")}, -+ RowShards: 2, -+ }, -+ }, -+ } -+ -+ for _, c := range testExpr { -+ t.Run(c.name, func(t *testing.T) { -+ out, err := c.confs.ValidRange(c.req.Start, c.req.End) -+ -+ if c.err != nil { -+ require.EqualError(t, err, c.err.Error()) -+ } else { -+ require.Nil(t, err) -+ require.Equal(t, c.expected, out) -+ } -+ }) -+ } -+} -+ -+func parseDate(in string) model.Time { -+ t, err := time.Parse(""2006-01-02"", in) -+ if err != nil { -+ panic(err) -+ } -+ return model.Time(t.UnixNano()) -+} -+ -+// mappingValidator can be injected into a middleware chain to assert that a query matches an expected query -+type mappingValidator struct { -+ expected string -+ next Handler -+} -+ -+func (v *mappingValidator) Do(ctx context.Context, req Request) (Response, error) { -+ expr, err := parser.ParseExpr(req.GetQuery()) -+ if err != nil { -+ return nil, err -+ } -+ -+ if v.expected != expr.String() { -+ return nil, errors.Errorf(""bad query mapping: expected [%s], got [%s]"", v.expected, expr.String()) -+ } -+ -+ return v.next.Do(ctx, req) -+} -+ -+// approximatelyEquals ensures two responses are approximately equal, up to 6 decimals precision per sample -+func approximatelyEquals(t *testing.T, a, b *PrometheusResponse) { -+ require.Equal(t, a.Status, b.Status) -+ if a.Status != StatusSuccess { -+ return -+ } -+ as, err := ResponseToSamples(a) -+ require.Nil(t, err) -+ bs, err := ResponseToSamples(b) -+ require.Nil(t, err) -+ -+ require.Equal(t, len(as), len(bs)) -+ -+ for i := 0; i < len(as); i++ { -+ a := as[i] -+ b := bs[i] -+ require.Equal(t, a.Labels, b.Labels) -+ require.Equal(t, len(a.Samples), len(b.Samples)) -+ -+ for j := 0; j < len(a.Samples); j++ { -+ aSample := &a.Samples[j] -+ aSample.Value = math.Round(aSample.Value*1e6) / 1e6 -+ bSample := &b.Samples[j] -+ bSample.Value = math.Round(bSample.Value*1e6) / 1e6 -+ } -+ require.Equal(t, a, b) -+ } -+} -+ -+func TestQueryshardingCorrectness(t *testing.T) { -+ shardFactor := 2 -+ req := &PrometheusRequest{ -+ Path: ""/query_range"", -+ Start: util.TimeToMillis(start), -+ End: util.TimeToMillis(end), -+ Step: int64(step) / int64(time.Second), -+ } -+ for _, tc := range []struct { -+ desc string -+ query string -+ mapped string -+ }{ -+ { -+ desc: ""fully encoded histogram_quantile"", -+ query: `histogram_quantile(0.5, rate(bar1{baz=""blip""}[30s]))`, -+ mapped: `__embedded_queries__{__cortex_queries__=""{\""Concat\"":[\""histogram_quantile(0.5, rate(bar1{baz=\\\""blip\\\""}[30s]))\""]}""}`, -+ }, -+ { -+ desc: ""entire query with shard summer"", -+ query: `sum by (foo,bar) (min_over_time(bar1{baz=""blip""}[1m]))`, -+ mapped: `sum by(foo, bar) (__embedded_queries__{__cortex_queries__=""{\""Concat\"":[\""sum by(foo, bar, __cortex_shard__) (min_over_time(bar1{__cortex_shard__=\\\""0_of_2\\\"",baz=\\\""blip\\\""}[1m]))\"",\""sum by(foo, bar, __cortex_shard__) (min_over_time(bar1{__cortex_shard__=\\\""1_of_2\\\"",baz=\\\""blip\\\""}[1m]))\""]}""})`, -+ }, -+ { -+ desc: ""shard one leg encode the other"", -+ query: ""sum(rate(bar1[1m])) or rate(bar1[1m])"", -+ mapped: `sum without(__cortex_shard__) (__embedded_queries__{__cortex_queries__=""{\""Concat\"":[\""sum by(__cortex_shard__) (rate(bar1{__cortex_shard__=\\\""0_of_2\\\""}[1m]))\"",\""sum by(__cortex_shard__) (rate(bar1{__cortex_shard__=\\\""1_of_2\\\""}[1m]))\""]}""}) or __embedded_queries__{__cortex_queries__=""{\""Concat\"":[\""rate(bar1[1m])\""]}""}`, -+ }, -+ { -+ desc: ""should skip encoding leaf scalar/strings"", -+ query: `histogram_quantile(0.5, sum(rate(cortex_cache_value_size_bytes_bucket[5m])) by (le))`, -+ mapped: `histogram_quantile(0.5, sum by(le) (__embedded_queries__{__cortex_queries__=""{\""Concat\"":[\""sum by(le, __cortex_shard__) (rate(cortex_cache_value_size_bytes_bucket{__cortex_shard__=\\\""0_of_2\\\""}[5m]))\"",\""sum by(le, __cortex_shard__) (rate(cortex_cache_value_size_bytes_bucket{__cortex_shard__=\\\""1_of_2\\\""}[5m]))\""]}""}))`, -+ }, -+ { -+ desc: ""ensure sharding sub aggregations are skipped to avoid non-associative series merging across shards"", -+ query: `sum( -+ count( -+ count( -+ bar1 -+ ) by (drive,instance) -+ ) by (instance) -+ )`, -+ mapped: `__embedded_queries__{__cortex_queries__=""{\""Concat\"":[\""sum(count by(instance) (count by(drive, instance) (bar1)))\""]}""}`, -+ }, -+ } { -+ t.Run(tc.desc, func(t *testing.T) { -+ shardingConf := ShardingConfigs{ -+ chunk.PeriodConfig{ -+ Schema: ""v10"", -+ RowShards: uint32(shardFactor), -+ }, -+ } -+ shardingware := NewQueryShardMiddleware( -+ log.NewNopLogger(), -+ engine, -+ // ensure that all requests are shard compatbile -+ shardingConf, -+ PrometheusCodec, -+ 0, -+ nil, -+ nil, -+ ) -+ -+ downstream := &downstreamHandler{ -+ engine: engine, -+ queryable: shardAwareQueryable, -+ } -+ -+ assertionMWare := MiddlewareFunc(func(next Handler) Handler { -+ return &mappingValidator{ -+ expected: tc.mapped, -+ next: next, -+ } -+ }) -+ -+ mapperware := MiddlewareFunc(func(next Handler) Handler { -+ return newASTMapperware(shardingConf, next, log.NewNopLogger(), nil) -+ }) -+ -+ r := req.WithQuery(tc.query) -+ -+ // ensure the expected ast mapping occurs -+ _, err := MergeMiddlewares(mapperware, assertionMWare).Wrap(downstream).Do(context.Background(), r) -+ require.Nil(t, err) -+ -+ shardedRes, err := shardingware.Wrap(downstream).Do(context.Background(), r) -+ require.Nil(t, err) -+ -+ res, err := downstream.Do(context.Background(), r) -+ require.Nil(t, err) -+ -+ approximatelyEquals(t, res.(*PrometheusResponse), shardedRes.(*PrometheusResponse)) -+ }) -+ } -+} -+ -+func TestShardSplitting(t *testing.T) { -+ -+ for _, tc := range []struct { -+ desc string -+ lookback time.Duration -+ shouldShard bool -+ }{ -+ { -+ desc: ""older than lookback"", -+ lookback: -1, // a negative lookback will ensure the entire query doesn't cross the sharding boundary & can safely be sharded. -+ shouldShard: true, -+ }, -+ { -+ desc: ""overlaps lookback"", -+ lookback: end.Sub(start) / 2, // intersect the request causing it to avoid sharding -+ shouldShard: false, -+ }, -+ { -+ desc: ""newer than lookback"", -+ lookback: end.Sub(start) + 1, -+ shouldShard: false, -+ }, -+ } { -+ t.Run(tc.desc, func(t *testing.T) { -+ req := &PrometheusRequest{ -+ Path: ""/query_range"", -+ Start: util.TimeToMillis(start), -+ End: util.TimeToMillis(end), -+ Step: int64(step) / int64(time.Second), -+ Query: ""sum(rate(bar1[1m]))"", -+ } -+ -+ shardingware := NewQueryShardMiddleware( -+ log.NewNopLogger(), -+ engine, -+ // ensure that all requests are shard compatbile -+ ShardingConfigs{ -+ chunk.PeriodConfig{ -+ Schema: ""v10"", -+ RowShards: uint32(2), -+ }, -+ }, -+ PrometheusCodec, -+ tc.lookback, -+ nil, -+ nil, -+ ) -+ -+ downstream := &downstreamHandler{ -+ engine: engine, -+ queryable: shardAwareQueryable, -+ } -+ -+ handler := shardingware.Wrap(downstream).(*shardSplitter) -+ handler.now = func() time.Time { return end } // make the split cut the request in half (don't use time.Now) -+ -+ var didShard bool -+ -+ old := handler.shardingware -+ handler.shardingware = HandlerFunc(func(ctx context.Context, req Request) (Response, error) { -+ didShard = true -+ return old.Do(ctx, req) -+ }) -+ -+ resp, err := handler.Do(context.Background(), req) -+ require.Nil(t, err) -+ -+ require.Equal(t, tc.shouldShard, didShard) -+ -+ unaltered, err := downstream.Do(context.Background(), req) -+ require.Nil(t, err) -+ -+ approximatelyEquals(t, unaltered.(*PrometheusResponse), resp.(*PrometheusResponse)) -+ -+ }) -+ } -+ -+} -+ -+func BenchmarkQuerySharding(b *testing.B) { -+ -+ var shards []uint32 -+ -+ // max out at half available cpu cores in order to minimize noisy neighbor issues while benchmarking -+ for shard := 1; shard <= runtime.NumCPU()/2; shard = shard * 2 { -+ shards = append(shards, uint32(shard)) -+ } -+ -+ for _, tc := range []struct { -+ labelBuckets int -+ labels []string -+ samplesPerSeries int -+ query string -+ desc string -+ }{ -+ // Ensure you have enough cores to run these tests without blocking. -+ // We want to simulate parallel computations and waiting in queue doesn't help -+ -+ // no-group -+ { -+ labelBuckets: 16, -+ labels: []string{""a"", ""b"", ""c""}, -+ samplesPerSeries: 100, -+ query: `sum(rate(http_requests_total[5m]))`, -+ desc: ""sum nogroup"", -+ }, -+ // sum by -+ { -+ labelBuckets: 16, -+ labels: []string{""a"", ""b"", ""c""}, -+ samplesPerSeries: 100, -+ query: `sum by(a) (rate(http_requests_total[5m]))`, -+ desc: ""sum by"", -+ }, -+ // sum without -+ { -+ labelBuckets: 16, -+ labels: []string{""a"", ""b"", ""c""}, -+ samplesPerSeries: 100, -+ query: `sum without (a) (rate(http_requests_total[5m]))`, -+ desc: ""sum without"", -+ }, -+ } { -+ for _, delayPerSeries := range []time.Duration{ -+ 0, -+ time.Millisecond / 10, -+ } { -+ engine := promql.NewEngine(promql.EngineOpts{ -+ Logger: log.NewNopLogger(), -+ Reg: nil, -+ MaxSamples: 100000000, -+ Timeout: time.Minute, -+ }) -+ -+ queryable := NewMockShardedQueryable( -+ tc.samplesPerSeries, -+ tc.labels, -+ tc.labelBuckets, -+ delayPerSeries, -+ ) -+ downstream := &downstreamHandler{ -+ engine: engine, -+ queryable: queryable, -+ } -+ -+ var ( -+ start int64 -+ end = int64(1000 * tc.samplesPerSeries) -+ step = (end - start) / 1000 -+ ) -+ -+ req := &PrometheusRequest{ -+ Path: ""/query_range"", -+ Start: start, -+ End: end, -+ Step: step, -+ Timeout: time.Minute, -+ Query: tc.query, -+ } -+ -+ for _, shardFactor := range shards { -+ shardingware := NewQueryShardMiddleware( -+ log.NewNopLogger(), -+ engine, -+ // ensure that all requests are shard compatbile -+ ShardingConfigs{ -+ chunk.PeriodConfig{ -+ Schema: ""v10"", -+ RowShards: shardFactor, -+ }, -+ }, -+ PrometheusCodec, -+ 0, -+ nil, -+ nil, -+ ).Wrap(downstream) -+ -+ b.Run( -+ fmt.Sprintf( -+ ""desc:[%s]---shards:[%d]---series:[%.0f]---delayPerSeries:[%s]---samplesPerSeries:[%d]"", -+ tc.desc, -+ shardFactor, -+ math.Pow(float64(tc.labelBuckets), float64(len(tc.labels))), -+ delayPerSeries, -+ tc.samplesPerSeries, -+ ), -+ func(b *testing.B) { -+ for n := 0; n < b.N; n++ { -+ _, err := shardingware.Do( -+ context.Background(), -+ req, -+ ) -+ if err != nil { -+ b.Fatal(err.Error()) -+ } -+ } -+ }, -+ ) -+ } -+ fmt.Println() -+ } -+ -+ fmt.Print(""--------------------------------\n\n"") -+ } -+} -+ -+type downstreamHandler struct { -+ engine *promql.Engine -+ queryable storage.Queryable -+} -+ -+func (h *downstreamHandler) Do(ctx context.Context, r Request) (Response, error) { -+ qry, err := h.engine.NewRangeQuery( -+ h.queryable, -+ r.GetQuery(), -+ util.TimeFromMillis(r.GetStart()), -+ util.TimeFromMillis(r.GetEnd()), -+ time.Duration(r.GetStep())*time.Millisecond, -+ ) -+ -+ if err != nil { -+ return nil, err -+ } -+ -+ res := qry.Exec(ctx) -+ extracted, err := FromResult(res) -+ if err != nil { -+ return nil, err -+ -+ } -+ -+ return &PrometheusResponse{ -+ Status: StatusSuccess, -+ Data: PrometheusData{ -+ ResultType: string(res.Value.Type()), -+ Result: extracted, -+ }, -+ }, nil -+} -diff --git a/pkg/querier/queryrange/queryrangebase/results_cache_test.go b/pkg/querier/queryrange/queryrangebase/results_cache_test.go -new file mode 100644 -index 0000000000000..75756b3b06d4e ---- /dev/null -+++ b/pkg/querier/queryrange/queryrangebase/results_cache_test.go -@@ -0,0 +1,1040 @@ -+package queryrangebase -+ -+import ( -+ ""context"" -+ ""fmt"" -+ ""strconv"" -+ ""testing"" -+ ""time"" -+ -+ ""github.com/cortexproject/cortex/pkg/cortexpb"" -+ ""github.com/go-kit/log"" -+ ""github.com/gogo/protobuf/types"" -+ ""github.com/grafana/dskit/flagext"" -+ ""github.com/prometheus/common/model"" -+ ""github.com/stretchr/testify/assert"" -+ ""github.com/stretchr/testify/require"" -+ ""github.com/weaveworks/common/user"" -+ -+ ""github.com/grafana/loki/pkg/storage/chunk/cache"" -+) -+ -+const ( -+ query = ""/api/v1/query_range?end=1536716898&query=sum%28container_memory_rss%29+by+%28namespace%29&start=1536673680&step=120"" -+ responseBody = `{""status"":""success"",""data"":{""resultType"":""matrix"",""result"":[{""metric"":{""foo"":""bar""},""values"":[[1536673680,""137""],[1536673780,""137""]]}]}}` -+) -+ -+var ( -+ parsedRequest = &PrometheusRequest{ -+ Path: ""/api/v1/query_range"", -+ Start: 1536673680 * 1e3, -+ End: 1536716898 * 1e3, -+ Step: 120 * 1e3, -+ Query: ""sum(container_memory_rss) by (namespace)"", -+ } -+ reqHeaders = []*PrometheusRequestHeader{ -+ { -+ Name: ""Test-Header"", -+ Values: []string{""test""}, -+ }, -+ } -+ noCacheRequest = &PrometheusRequest{ -+ Path: ""/api/v1/query_range"", -+ Start: 1536673680 * 1e3, -+ End: 1536716898 * 1e3, -+ Step: 120 * 1e3, -+ Query: ""sum(container_memory_rss) by (namespace)"", -+ CachingOptions: CachingOptions{Disabled: true}, -+ } -+ respHeaders = []*PrometheusResponseHeader{ -+ { -+ Name: ""Content-Type"", -+ Values: []string{""application/json""}, -+ }, -+ } -+ parsedResponse = &PrometheusResponse{ -+ Status: ""success"", -+ Data: PrometheusData{ -+ ResultType: model.ValMatrix.String(), -+ Result: []SampleStream{ -+ { -+ Labels: []cortexpb.LabelAdapter{ -+ {Name: ""foo"", Value: ""bar""}, -+ }, -+ Samples: []cortexpb.Sample{ -+ {Value: 137, TimestampMs: 1536673680000}, -+ {Value: 137, TimestampMs: 1536673780000}, -+ }, -+ }, -+ }, -+ }, -+ } -+) -+ -+func mkAPIResponse(start, end, step int64) *PrometheusResponse { -+ var samples []cortexpb.Sample -+ for i := start; i <= end; i += step { -+ samples = append(samples, cortexpb.Sample{ -+ TimestampMs: i, -+ Value: float64(i), -+ }) -+ } -+ -+ return &PrometheusResponse{ -+ Status: StatusSuccess, -+ Data: PrometheusData{ -+ ResultType: matrix, -+ Result: []SampleStream{ -+ { -+ Labels: []cortexpb.LabelAdapter{ -+ {Name: ""foo"", Value: ""bar""}, -+ }, -+ Samples: samples, -+ }, -+ }, -+ }, -+ } -+} -+ -+func mkExtent(start, end int64) Extent { -+ return mkExtentWithStep(start, end, 10) -+} -+ -+func mkExtentWithStep(start, end, step int64) Extent { -+ res := mkAPIResponse(start, end, step) -+ any, err := types.MarshalAny(res) -+ if err != nil { -+ panic(err) -+ } -+ return Extent{ -+ Start: start, -+ End: end, -+ Response: any, -+ } -+} -+ -+func TestShouldCache(t *testing.T) { -+ maxCacheTime := int64(150 * 1000) -+ c := &resultsCache{logger: log.NewNopLogger(), cacheGenNumberLoader: newMockCacheGenNumberLoader()} -+ for _, tc := range []struct { -+ name string -+ request Request -+ input Response -+ cacheGenNumberToInject string -+ expected bool -+ }{ -+ // Tests only for cacheControlHeader -+ { -+ name: ""does not contain the cacheControl header"", -+ request: &PrometheusRequest{Query: ""metric""}, -+ input: Response(&PrometheusResponse{ -+ Headers: []*PrometheusResponseHeader{ -+ { -+ Name: ""meaninglessheader"", -+ Values: []string{}, -+ }, -+ }, -+ }), -+ expected: true, -+ }, -+ { -+ name: ""does contain the cacheControl header which has the value"", -+ request: &PrometheusRequest{Query: ""metric""}, -+ input: Response(&PrometheusResponse{ -+ Headers: []*PrometheusResponseHeader{ -+ { -+ Name: cacheControlHeader, -+ Values: []string{noStoreValue}, -+ }, -+ }, -+ }), -+ expected: false, -+ }, -+ { -+ name: ""cacheControl header contains extra values but still good"", -+ request: &PrometheusRequest{Query: ""metric""}, -+ input: Response(&PrometheusResponse{ -+ Headers: []*PrometheusResponseHeader{ -+ { -+ Name: cacheControlHeader, -+ Values: []string{""foo"", noStoreValue}, -+ }, -+ }, -+ }), -+ expected: false, -+ }, -+ { -+ name: ""broken response"", -+ request: &PrometheusRequest{Query: ""metric""}, -+ input: Response(&PrometheusResponse{}), -+ expected: true, -+ }, -+ { -+ name: ""nil headers"", -+ request: &PrometheusRequest{Query: ""metric""}, -+ input: Response(&PrometheusResponse{ -+ Headers: []*PrometheusResponseHeader{nil}, -+ }), -+ expected: true, -+ }, -+ { -+ name: ""had cacheControl header but no values"", -+ request: &PrometheusRequest{Query: ""metric""}, -+ input: Response(&PrometheusResponse{ -+ Headers: []*PrometheusResponseHeader{{Name: cacheControlHeader}}, -+ }), -+ expected: true, -+ }, -+ -+ // Tests only for cacheGenNumber header -+ { -+ name: ""cacheGenNumber not set in both header and store"", -+ request: &PrometheusRequest{Query: ""metric""}, -+ input: Response(&PrometheusResponse{ -+ Headers: []*PrometheusResponseHeader{ -+ { -+ Name: ""meaninglessheader"", -+ Values: []string{}, -+ }, -+ }, -+ }), -+ expected: true, -+ }, -+ { -+ name: ""cacheGenNumber set in store but not in header"", -+ request: &PrometheusRequest{Query: ""metric""}, -+ input: Response(&PrometheusResponse{ -+ Headers: []*PrometheusResponseHeader{ -+ { -+ Name: ""meaninglessheader"", -+ Values: []string{}, -+ }, -+ }, -+ }), -+ cacheGenNumberToInject: ""1"", -+ expected: false, -+ }, -+ { -+ name: ""cacheGenNumber set in header but not in store"", -+ request: &PrometheusRequest{Query: ""metric""}, -+ input: Response(&PrometheusResponse{ -+ Headers: []*PrometheusResponseHeader{ -+ { -+ Name: ResultsCacheGenNumberHeaderName, -+ Values: []string{""1""}, -+ }, -+ }, -+ }), -+ expected: false, -+ }, -+ { -+ name: ""cacheGenNumber in header and store are the same"", -+ request: &PrometheusRequest{Query: ""metric""}, -+ input: Response(&PrometheusResponse{ -+ Headers: []*PrometheusResponseHeader{ -+ { -+ Name: ResultsCacheGenNumberHeaderName, -+ Values: []string{""1"", ""1""}, -+ }, -+ }, -+ }), -+ cacheGenNumberToInject: ""1"", -+ expected: true, -+ }, -+ { -+ name: ""inconsistency between cacheGenNumber in header and store"", -+ request: &PrometheusRequest{Query: ""metric""}, -+ input: Response(&PrometheusResponse{ -+ Headers: []*PrometheusResponseHeader{ -+ { -+ Name: ResultsCacheGenNumberHeaderName, -+ Values: []string{""1"", ""2""}, -+ }, -+ }, -+ }), -+ cacheGenNumberToInject: ""1"", -+ expected: false, -+ }, -+ { -+ name: ""cacheControl header says not to catch and cacheGenNumbers in store and headers have consistency"", -+ request: &PrometheusRequest{Query: ""metric""}, -+ input: Response(&PrometheusResponse{ -+ Headers: []*PrometheusResponseHeader{ -+ { -+ Name: cacheControlHeader, -+ Values: []string{noStoreValue}, -+ }, -+ { -+ Name: ResultsCacheGenNumberHeaderName, -+ Values: []string{""1"", ""1""}, -+ }, -+ }, -+ }), -+ cacheGenNumberToInject: ""1"", -+ expected: false, -+ }, -+ // @ modifier on vector selectors. -+ { -+ name: ""@ modifier on vector selector, before end, before maxCacheTime"", -+ request: &PrometheusRequest{Query: ""metric @ 123"", End: 125000}, -+ input: Response(&PrometheusResponse{}), -+ expected: true, -+ }, -+ { -+ name: ""@ modifier on vector selector, after end, before maxCacheTime"", -+ request: &PrometheusRequest{Query: ""metric @ 127"", End: 125000}, -+ input: Response(&PrometheusResponse{}), -+ expected: false, -+ }, -+ { -+ name: ""@ modifier on vector selector, before end, after maxCacheTime"", -+ request: &PrometheusRequest{Query: ""metric @ 151"", End: 200000}, -+ input: Response(&PrometheusResponse{}), -+ expected: false, -+ }, -+ { -+ name: ""@ modifier on vector selector, after end, after maxCacheTime"", -+ request: &PrometheusRequest{Query: ""metric @ 151"", End: 125000}, -+ input: Response(&PrometheusResponse{}), -+ expected: false, -+ }, -+ { -+ name: ""@ modifier on vector selector with start() before maxCacheTime"", -+ request: &PrometheusRequest{Query: ""metric @ start()"", Start: 100000, End: 200000}, -+ input: Response(&PrometheusResponse{}), -+ expected: true, -+ }, -+ { -+ name: ""@ modifier on vector selector with end() after maxCacheTime"", -+ request: &PrometheusRequest{Query: ""metric @ end()"", Start: 100000, End: 200000}, -+ input: Response(&PrometheusResponse{}), -+ expected: false, -+ }, -+ // @ modifier on matrix selectors. -+ { -+ name: ""@ modifier on matrix selector, before end, before maxCacheTime"", -+ request: &PrometheusRequest{Query: ""rate(metric[5m] @ 123)"", End: 125000}, -+ input: Response(&PrometheusResponse{}), -+ expected: true, -+ }, -+ { -+ name: ""@ modifier on matrix selector, after end, before maxCacheTime"", -+ request: &PrometheusRequest{Query: ""rate(metric[5m] @ 127)"", End: 125000}, -+ input: Response(&PrometheusResponse{}), -+ expected: false, -+ }, -+ { -+ name: ""@ modifier on matrix selector, before end, after maxCacheTime"", -+ request: &PrometheusRequest{Query: ""rate(metric[5m] @ 151)"", End: 200000}, -+ input: Response(&PrometheusResponse{}), -+ expected: false, -+ }, -+ { -+ name: ""@ modifier on matrix selector, after end, after maxCacheTime"", -+ request: &PrometheusRequest{Query: ""rate(metric[5m] @ 151)"", End: 125000}, -+ input: Response(&PrometheusResponse{}), -+ expected: false, -+ }, -+ { -+ name: ""@ modifier on matrix selector with start() before maxCacheTime"", -+ request: &PrometheusRequest{Query: ""rate(metric[5m] @ start())"", Start: 100000, End: 200000}, -+ input: Response(&PrometheusResponse{}), -+ expected: true, -+ }, -+ { -+ name: ""@ modifier on matrix selector with end() after maxCacheTime"", -+ request: &PrometheusRequest{Query: ""rate(metric[5m] @ end())"", Start: 100000, End: 200000}, -+ input: Response(&PrometheusResponse{}), -+ expected: false, -+ }, -+ // @ modifier on subqueries. -+ { -+ name: ""@ modifier on subqueries, before end, before maxCacheTime"", -+ request: &PrometheusRequest{Query: ""sum_over_time(rate(metric[1m])[10m:1m] @ 123)"", End: 125000}, -+ input: Response(&PrometheusResponse{}), -+ expected: true, -+ }, -+ { -+ name: ""@ modifier on subqueries, after end, before maxCacheTime"", -+ request: &PrometheusRequest{Query: ""sum_over_time(rate(metric[1m])[10m:1m] @ 127)"", End: 125000}, -+ input: Response(&PrometheusResponse{}), -+ expected: false, -+ }, -+ { -+ name: ""@ modifier on subqueries, before end, after maxCacheTime"", -+ request: &PrometheusRequest{Query: ""sum_over_time(rate(metric[1m])[10m:1m] @ 151)"", End: 200000}, -+ input: Response(&PrometheusResponse{}), -+ expected: false, -+ }, -+ { -+ name: ""@ modifier on subqueries, after end, after maxCacheTime"", -+ request: &PrometheusRequest{Query: ""sum_over_time(rate(metric[1m])[10m:1m] @ 151)"", End: 125000}, -+ input: Response(&PrometheusResponse{}), -+ expected: false, -+ }, -+ { -+ name: ""@ modifier on subqueries with start() before maxCacheTime"", -+ request: &PrometheusRequest{Query: ""sum_over_time(rate(metric[1m])[10m:1m] @ start())"", Start: 100000, End: 200000}, -+ input: Response(&PrometheusResponse{}), -+ expected: true, -+ }, -+ { -+ name: ""@ modifier on subqueries with end() after maxCacheTime"", -+ request: &PrometheusRequest{Query: ""sum_over_time(rate(metric[1m])[10m:1m] @ end())"", Start: 100000, End: 200000}, -+ input: Response(&PrometheusResponse{}), -+ expected: false, -+ }, -+ } { -+ { -+ t.Run(tc.name, func(t *testing.T) { -+ ctx := cache.InjectCacheGenNumber(context.Background(), tc.cacheGenNumberToInject) -+ ret := c.shouldCacheResponse(ctx, tc.request, tc.input, maxCacheTime) -+ require.Equal(t, tc.expected, ret) -+ }) -+ } -+ } -+} -+ -+func TestPartition(t *testing.T) { -+ for _, tc := range []struct { -+ name string -+ input Request -+ prevCachedResponse []Extent -+ expectedRequests []Request -+ expectedCachedResponse []Response -+ }{ -+ { -+ name: ""Test a complete hit."", -+ input: &PrometheusRequest{ -+ Start: 0, -+ End: 100, -+ }, -+ prevCachedResponse: []Extent{ -+ mkExtent(0, 100), -+ }, -+ expectedCachedResponse: []Response{ -+ mkAPIResponse(0, 100, 10), -+ }, -+ }, -+ -+ { -+ name: ""Test with a complete miss."", -+ input: &PrometheusRequest{ -+ Start: 0, -+ End: 100, -+ }, -+ prevCachedResponse: []Extent{ -+ mkExtent(110, 210), -+ }, -+ expectedRequests: []Request{ -+ &PrometheusRequest{ -+ Start: 0, -+ End: 100, -+ }}, -+ }, -+ { -+ name: ""Test a partial hit."", -+ input: &PrometheusRequest{ -+ Start: 0, -+ End: 100, -+ }, -+ prevCachedResponse: []Extent{ -+ mkExtent(50, 100), -+ }, -+ expectedRequests: []Request{ -+ &PrometheusRequest{ -+ Start: 0, -+ End: 50, -+ }, -+ }, -+ expectedCachedResponse: []Response{ -+ mkAPIResponse(50, 100, 10), -+ }, -+ }, -+ { -+ name: ""Test multiple partial hits."", -+ input: &PrometheusRequest{ -+ Start: 100, -+ End: 200, -+ }, -+ prevCachedResponse: []Extent{ -+ mkExtent(50, 120), -+ mkExtent(160, 250), -+ }, -+ expectedRequests: []Request{ -+ &PrometheusRequest{ -+ Start: 120, -+ End: 160, -+ }, -+ }, -+ expectedCachedResponse: []Response{ -+ mkAPIResponse(100, 120, 10), -+ mkAPIResponse(160, 200, 10), -+ }, -+ }, -+ { -+ name: ""Partial hits with tiny gap."", -+ input: &PrometheusRequest{ -+ Start: 100, -+ End: 160, -+ }, -+ prevCachedResponse: []Extent{ -+ mkExtent(50, 120), -+ mkExtent(122, 130), -+ }, -+ expectedRequests: []Request{ -+ &PrometheusRequest{ -+ Start: 120, -+ End: 160, -+ }, -+ }, -+ expectedCachedResponse: []Response{ -+ mkAPIResponse(100, 120, 10), -+ }, -+ }, -+ { -+ name: ""Extent is outside the range and the request has a single step (same start and end)."", -+ input: &PrometheusRequest{ -+ Start: 100, -+ End: 100, -+ }, -+ prevCachedResponse: []Extent{ -+ mkExtent(50, 90), -+ }, -+ expectedRequests: []Request{ -+ &PrometheusRequest{ -+ Start: 100, -+ End: 100, -+ }, -+ }, -+ }, -+ { -+ name: ""Test when hit has a large step and only a single sample extent."", -+ // If there is a only a single sample in the split interval, start and end will be the same. -+ input: &PrometheusRequest{ -+ Start: 100, -+ End: 100, -+ }, -+ prevCachedResponse: []Extent{ -+ mkExtent(100, 100), -+ }, -+ expectedCachedResponse: []Response{ -+ mkAPIResponse(100, 105, 10), -+ }, -+ }, -+ } { -+ t.Run(tc.name, func(t *testing.T) { -+ s := resultsCache{ -+ extractor: PrometheusResponseExtractor{}, -+ minCacheExtent: 10, -+ } -+ reqs, resps, err := s.partition(tc.input, tc.prevCachedResponse) -+ require.Nil(t, err) -+ require.Equal(t, tc.expectedRequests, reqs) -+ require.Equal(t, tc.expectedCachedResponse, resps) -+ }) -+ } -+} -+ -+func TestHandleHit(t *testing.T) { -+ for _, tc := range []struct { -+ name string -+ input Request -+ cachedEntry []Extent -+ expectedUpdatedCachedEntry []Extent -+ }{ -+ { -+ name: ""Should drop tiny extent that overlaps with non-tiny request only"", -+ input: &PrometheusRequest{ -+ Start: 100, -+ End: 120, -+ Step: 5, -+ }, -+ cachedEntry: []Extent{ -+ mkExtentWithStep(0, 50, 5), -+ mkExtentWithStep(60, 65, 5), -+ mkExtentWithStep(100, 105, 5), -+ mkExtentWithStep(110, 150, 5), -+ mkExtentWithStep(160, 165, 5), -+ }, -+ expectedUpdatedCachedEntry: []Extent{ -+ mkExtentWithStep(0, 50, 5), -+ mkExtentWithStep(60, 65, 5), -+ mkExtentWithStep(100, 150, 5), -+ mkExtentWithStep(160, 165, 5), -+ }, -+ }, -+ { -+ name: ""Should replace tiny extents that are cover by bigger request"", -+ input: &PrometheusRequest{ -+ Start: 100, -+ End: 200, -+ Step: 5, -+ }, -+ cachedEntry: []Extent{ -+ mkExtentWithStep(0, 50, 5), -+ mkExtentWithStep(60, 65, 5), -+ mkExtentWithStep(100, 105, 5), -+ mkExtentWithStep(110, 115, 5), -+ mkExtentWithStep(120, 125, 5), -+ mkExtentWithStep(220, 225, 5), -+ mkExtentWithStep(240, 250, 5), -+ }, -+ expectedUpdatedCachedEntry: []Extent{ -+ mkExtentWithStep(0, 50, 5), -+ mkExtentWithStep(60, 65, 5), -+ mkExtentWithStep(100, 200, 5), -+ mkExtentWithStep(220, 225, 5), -+ mkExtentWithStep(240, 250, 5), -+ }, -+ }, -+ { -+ name: ""Should not drop tiny extent that completely overlaps with tiny request"", -+ input: &PrometheusRequest{ -+ Start: 100, -+ End: 105, -+ Step: 5, -+ }, -+ cachedEntry: []Extent{ -+ mkExtentWithStep(0, 50, 5), -+ mkExtentWithStep(60, 65, 5), -+ mkExtentWithStep(100, 105, 5), -+ mkExtentWithStep(160, 165, 5), -+ }, -+ expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache -+ }, -+ { -+ name: ""Should not drop tiny extent that partially center-overlaps with tiny request"", -+ input: &PrometheusRequest{ -+ Start: 106, -+ End: 108, -+ Step: 2, -+ }, -+ cachedEntry: []Extent{ -+ mkExtentWithStep(60, 64, 2), -+ mkExtentWithStep(104, 110, 2), -+ mkExtentWithStep(160, 166, 2), -+ }, -+ expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache -+ }, -+ { -+ name: ""Should not drop tiny extent that partially left-overlaps with tiny request"", -+ input: &PrometheusRequest{ -+ Start: 100, -+ End: 106, -+ Step: 2, -+ }, -+ cachedEntry: []Extent{ -+ mkExtentWithStep(60, 64, 2), -+ mkExtentWithStep(104, 110, 2), -+ mkExtentWithStep(160, 166, 2), -+ }, -+ expectedUpdatedCachedEntry: []Extent{ -+ mkExtentWithStep(60, 64, 2), -+ mkExtentWithStep(100, 110, 2), -+ mkExtentWithStep(160, 166, 2), -+ }, -+ }, -+ { -+ name: ""Should not drop tiny extent that partially right-overlaps with tiny request"", -+ input: &PrometheusRequest{ -+ Start: 100, -+ End: 106, -+ Step: 2, -+ }, -+ cachedEntry: []Extent{ -+ mkExtentWithStep(60, 64, 2), -+ mkExtentWithStep(98, 102, 2), -+ mkExtentWithStep(160, 166, 2), -+ }, -+ expectedUpdatedCachedEntry: []Extent{ -+ mkExtentWithStep(60, 64, 2), -+ mkExtentWithStep(98, 106, 2), -+ mkExtentWithStep(160, 166, 2), -+ }, -+ }, -+ { -+ name: ""Should merge fragmented extents if request fills the hole"", -+ input: &PrometheusRequest{ -+ Start: 40, -+ End: 80, -+ Step: 20, -+ }, -+ cachedEntry: []Extent{ -+ mkExtentWithStep(0, 20, 20), -+ mkExtentWithStep(80, 100, 20), -+ }, -+ expectedUpdatedCachedEntry: []Extent{ -+ mkExtentWithStep(0, 100, 20), -+ }, -+ }, -+ { -+ name: ""Should left-extend extent if request starts earlier than extent in cache"", -+ input: &PrometheusRequest{ -+ Start: 40, -+ End: 80, -+ Step: 20, -+ }, -+ cachedEntry: []Extent{ -+ mkExtentWithStep(60, 160, 20), -+ }, -+ expectedUpdatedCachedEntry: []Extent{ -+ mkExtentWithStep(40, 160, 20), -+ }, -+ }, -+ { -+ name: ""Should right-extend extent if request ends later than extent in cache"", -+ input: &PrometheusRequest{ -+ Start: 100, -+ End: 180, -+ Step: 20, -+ }, -+ cachedEntry: []Extent{ -+ mkExtentWithStep(60, 160, 20), -+ }, -+ expectedUpdatedCachedEntry: []Extent{ -+ mkExtentWithStep(60, 180, 20), -+ }, -+ }, -+ { -+ name: ""Should not throw error if complete-overlapped smaller Extent is erroneous"", -+ input: &PrometheusRequest{ -+ // This request is carefully crated such that cachedEntry is not used to fulfill -+ // the request. -+ Start: 160, -+ End: 180, -+ Step: 20, -+ }, -+ cachedEntry: []Extent{ -+ { -+ Start: 60, -+ End: 80, -+ -+ // if the optimization of ""sorting by End when Start of 2 Extents are equal"" is not there, this nil -+ // response would cause error during Extents merge phase. With the optimization -+ // this bad Extent should be dropped. The good Extent below can be used instead. -+ Response: nil, -+ }, -+ mkExtentWithStep(60, 160, 20), -+ }, -+ expectedUpdatedCachedEntry: []Extent{ -+ mkExtentWithStep(60, 180, 20), -+ }, -+ }, -+ } { -+ t.Run(tc.name, func(t *testing.T) { -+ sut := resultsCache{ -+ extractor: PrometheusResponseExtractor{}, -+ minCacheExtent: 10, -+ limits: mockLimits{}, -+ merger: PrometheusCodec, -+ next: HandlerFunc(func(_ context.Context, req Request) (Response, error) { -+ return mkAPIResponse(req.GetStart(), req.GetEnd(), req.GetStep()), nil -+ }), -+ } -+ -+ ctx := user.InjectOrgID(context.Background(), ""1"") -+ response, updatedExtents, err := sut.handleHit(ctx, tc.input, tc.cachedEntry, 0) -+ require.NoError(t, err) -+ -+ expectedResponse := mkAPIResponse(tc.input.GetStart(), tc.input.GetEnd(), tc.input.GetStep()) -+ require.Equal(t, expectedResponse, response, ""response does not match the expectation"") -+ require.Equal(t, tc.expectedUpdatedCachedEntry, updatedExtents, ""updated cache entry does not match the expectation"") -+ }) -+ } -+} -+ -+func TestResultsCache(t *testing.T) { -+ calls := 0 -+ cfg := ResultsCacheConfig{ -+ CacheConfig: cache.Config{ -+ Cache: cache.NewMockCache(), -+ }, -+ } -+ rcm, _, err := NewResultsCacheMiddleware( -+ log.NewNopLogger(), -+ cfg, -+ constSplitter(day), -+ mockLimits{}, -+ PrometheusCodec, -+ PrometheusResponseExtractor{}, -+ nil, -+ nil, -+ nil, -+ ) -+ require.NoError(t, err) -+ -+ rc := rcm.Wrap(HandlerFunc(func(_ context.Context, req Request) (Response, error) { -+ calls++ -+ return parsedResponse, nil -+ })) -+ ctx := user.InjectOrgID(context.Background(), ""1"") -+ resp, err := rc.Do(ctx, parsedRequest) -+ require.NoError(t, err) -+ require.Equal(t, 1, calls) -+ require.Equal(t, parsedResponse, resp) -+ -+ // Doing same request again shouldn't change anything. -+ resp, err = rc.Do(ctx, parsedRequest) -+ require.NoError(t, err) -+ require.Equal(t, 1, calls) -+ require.Equal(t, parsedResponse, resp) -+ -+ // Doing request with new end time should do one more query. -+ req := parsedRequest.WithStartEnd(parsedRequest.GetStart(), parsedRequest.GetEnd()+100) -+ _, err = rc.Do(ctx, req) -+ require.NoError(t, err) -+ require.Equal(t, 2, calls) -+} -+ -+func TestResultsCacheRecent(t *testing.T) { -+ var cfg ResultsCacheConfig -+ flagext.DefaultValues(&cfg) -+ cfg.CacheConfig.Cache = cache.NewMockCache() -+ rcm, _, err := NewResultsCacheMiddleware( -+ log.NewNopLogger(), -+ cfg, -+ constSplitter(day), -+ mockLimits{maxCacheFreshness: 10 * time.Minute}, -+ PrometheusCodec, -+ PrometheusResponseExtractor{}, -+ nil, -+ nil, -+ nil, -+ ) -+ require.NoError(t, err) -+ -+ req := parsedRequest.WithStartEnd(int64(model.Now())-(60*1e3), int64(model.Now())) -+ -+ calls := 0 -+ rc := rcm.Wrap(HandlerFunc(func(_ context.Context, r Request) (Response, error) { -+ calls++ -+ assert.Equal(t, r, req) -+ return parsedResponse, nil -+ })) -+ ctx := user.InjectOrgID(context.Background(), ""1"") -+ -+ // Request should result in a query. -+ resp, err := rc.Do(ctx, req) -+ require.NoError(t, err) -+ require.Equal(t, 1, calls) -+ require.Equal(t, parsedResponse, resp) -+ -+ // Doing same request again should result in another query. -+ resp, err = rc.Do(ctx, req) -+ require.NoError(t, err) -+ require.Equal(t, 2, calls) -+ require.Equal(t, parsedResponse, resp) -+} -+ -+func TestResultsCacheMaxFreshness(t *testing.T) { -+ modelNow := model.Now() -+ for i, tc := range []struct { -+ fakeLimits Limits -+ Handler HandlerFunc -+ expectedResponse *PrometheusResponse -+ }{ -+ { -+ fakeLimits: mockLimits{maxCacheFreshness: 5 * time.Second}, -+ Handler: nil, -+ expectedResponse: mkAPIResponse(int64(modelNow)-(50*1e3), int64(modelNow)-(10*1e3), 10), -+ }, -+ { -+ // should not lookup cache because per-tenant override will be applied -+ fakeLimits: mockLimits{maxCacheFreshness: 10 * time.Minute}, -+ Handler: HandlerFunc(func(_ context.Context, _ Request) (Response, error) { -+ return parsedResponse, nil -+ }), -+ expectedResponse: parsedResponse, -+ }, -+ } { -+ t.Run(strconv.Itoa(i), func(t *testing.T) { -+ var cfg ResultsCacheConfig -+ flagext.DefaultValues(&cfg) -+ cfg.CacheConfig.Cache = cache.NewMockCache() -+ -+ fakeLimits := tc.fakeLimits -+ rcm, _, err := NewResultsCacheMiddleware( -+ log.NewNopLogger(), -+ cfg, -+ constSplitter(day), -+ fakeLimits, -+ PrometheusCodec, -+ PrometheusResponseExtractor{}, -+ nil, -+ nil, -+ nil, -+ ) -+ require.NoError(t, err) -+ -+ // create cache with handler -+ rc := rcm.Wrap(tc.Handler) -+ ctx := user.InjectOrgID(context.Background(), ""1"") -+ -+ // create request with start end within the key extents -+ req := parsedRequest.WithStartEnd(int64(modelNow)-(50*1e3), int64(modelNow)-(10*1e3)) -+ -+ // fill cache -+ key := constSplitter(day).GenerateCacheKey(""1"", req) -+ rc.(*resultsCache).put(ctx, key, []Extent{mkExtent(int64(modelNow)-(600*1e3), int64(modelNow))}) -+ -+ resp, err := rc.Do(ctx, req) -+ require.NoError(t, err) -+ require.Equal(t, tc.expectedResponse, resp) -+ }) -+ } -+} -+ -+func Test_resultsCache_MissingData(t *testing.T) { -+ cfg := ResultsCacheConfig{ -+ CacheConfig: cache.Config{ -+ Cache: cache.NewMockCache(), -+ }, -+ } -+ rm, _, err := NewResultsCacheMiddleware( -+ log.NewNopLogger(), -+ cfg, -+ constSplitter(day), -+ mockLimits{}, -+ PrometheusCodec, -+ PrometheusResponseExtractor{}, -+ nil, -+ nil, -+ nil, -+ ) -+ require.NoError(t, err) -+ rc := rm.Wrap(nil).(*resultsCache) -+ ctx := context.Background() -+ -+ // fill up the cache -+ rc.put(ctx, ""empty"", []Extent{{ -+ Start: 100, -+ End: 200, -+ Response: nil, -+ }}) -+ rc.put(ctx, ""notempty"", []Extent{mkExtent(100, 120)}) -+ rc.put(ctx, ""mixed"", []Extent{mkExtent(100, 120), { -+ Start: 120, -+ End: 200, -+ Response: nil, -+ }}) -+ -+ extents, hit := rc.get(ctx, ""empty"") -+ require.Empty(t, extents) -+ require.False(t, hit) -+ -+ extents, hit = rc.get(ctx, ""notempty"") -+ require.Equal(t, len(extents), 1) -+ require.True(t, hit) -+ -+ extents, hit = rc.get(ctx, ""mixed"") -+ require.Equal(t, len(extents), 0) -+ require.False(t, hit) -+} -+ -+func TestConstSplitter_generateCacheKey(t *testing.T) { -+ t.Parallel() -+ -+ tests := []struct { -+ name string -+ r Request -+ interval time.Duration -+ want string -+ }{ -+ {""0"", &PrometheusRequest{Start: 0, Step: 10, Query: ""foo{}""}, 30 * time.Minute, ""fake:foo{}:10:0""}, -+ {""<30m"", &PrometheusRequest{Start: toMs(10 * time.Minute), Step: 10, Query: ""foo{}""}, 30 * time.Minute, ""fake:foo{}:10:0""}, -+ {""30m"", &PrometheusRequest{Start: toMs(30 * time.Minute), Step: 10, Query: ""foo{}""}, 30 * time.Minute, ""fake:foo{}:10:1""}, -+ {""91m"", &PrometheusRequest{Start: toMs(91 * time.Minute), Step: 10, Query: ""foo{}""}, 30 * time.Minute, ""fake:foo{}:10:3""}, -+ {""0"", &PrometheusRequest{Start: 0, Step: 10, Query: ""foo{}""}, 24 * time.Hour, ""fake:foo{}:10:0""}, -+ {""<1d"", &PrometheusRequest{Start: toMs(22 * time.Hour), Step: 10, Query: ""foo{}""}, 24 * time.Hour, ""fake:foo{}:10:0""}, -+ {""4d"", &PrometheusRequest{Start: toMs(4 * 24 * time.Hour), Step: 10, Query: ""foo{}""}, 24 * time.Hour, ""fake:foo{}:10:4""}, -+ {""3d5h"", &PrometheusRequest{Start: toMs(77 * time.Hour), Step: 10, Query: ""foo{}""}, 24 * time.Hour, ""fake:foo{}:10:3""}, -+ } -+ for _, tt := range tests { -+ t.Run(fmt.Sprintf(""%s - %s"", tt.name, tt.interval), func(t *testing.T) { -+ if got := constSplitter(tt.interval).GenerateCacheKey(""fake"", tt.r); got != tt.want { -+ t.Errorf(""generateKey() = %v, want %v"", got, tt.want) -+ } -+ }) -+ } -+} -+ -+func TestResultsCacheShouldCacheFunc(t *testing.T) { -+ testcases := []struct { -+ name string -+ shouldCache ShouldCacheFn -+ requests []Request -+ expectedCall int -+ }{ -+ { -+ name: ""normal"", -+ shouldCache: nil, -+ requests: []Request{parsedRequest, parsedRequest}, -+ expectedCall: 1, -+ }, -+ { -+ name: ""always no cache"", -+ shouldCache: func(r Request) bool { -+ return false -+ }, -+ requests: []Request{parsedRequest, parsedRequest}, -+ expectedCall: 2, -+ }, -+ { -+ name: ""check cache based on request"", -+ shouldCache: func(r Request) bool { -+ return !r.GetCachingOptions().Disabled -+ }, -+ requests: []Request{noCacheRequest, noCacheRequest}, -+ expectedCall: 2, -+ }, -+ } -+ -+ for _, tc := range testcases { -+ t.Run(tc.name, func(t *testing.T) { -+ calls := 0 -+ var cfg ResultsCacheConfig -+ flagext.DefaultValues(&cfg) -+ cfg.CacheConfig.Cache = cache.NewMockCache() -+ rcm, _, err := NewResultsCacheMiddleware( -+ log.NewNopLogger(), -+ cfg, -+ constSplitter(day), -+ mockLimits{maxCacheFreshness: 10 * time.Minute}, -+ PrometheusCodec, -+ PrometheusResponseExtractor{}, -+ nil, -+ tc.shouldCache, -+ nil, -+ ) -+ require.NoError(t, err) -+ rc := rcm.Wrap(HandlerFunc(func(_ context.Context, req Request) (Response, error) { -+ calls++ -+ return parsedResponse, nil -+ })) -+ -+ for _, req := range tc.requests { -+ ctx := user.InjectOrgID(context.Background(), ""1"") -+ _, err := rc.Do(ctx, req) -+ require.NoError(t, err) -+ } -+ -+ require.Equal(t, tc.expectedCall, calls) -+ }) -+ } -+} -+ -+func toMs(t time.Duration) int64 { -+ return int64(t / time.Millisecond) -+} -+ -+type mockCacheGenNumberLoader struct { -+} -+ -+func newMockCacheGenNumberLoader() CacheGenNumberLoader { -+ return mockCacheGenNumberLoader{} -+} -+ -+func (mockCacheGenNumberLoader) GetResultsCacheGenNumber(tenantIDs []string) string { -+ return """" -+} -diff --git a/pkg/querier/queryrange/queryrangebase/retry_test.go b/pkg/querier/queryrange/queryrangebase/retry_test.go -new file mode 100644 -index 0000000000000..6346f5b3d3b08 ---- /dev/null -+++ b/pkg/querier/queryrange/queryrangebase/retry_test.go -@@ -0,0 +1,93 @@ -+package queryrangebase -+ -+import ( -+ ""context"" -+ ""errors"" -+ ""fmt"" -+ ""net/http"" -+ ""testing"" -+ -+ ""github.com/go-kit/log"" -+ ""github.com/stretchr/testify/require"" -+ ""github.com/weaveworks/common/httpgrpc"" -+ ""go.uber.org/atomic"" -+) -+ -+func TestRetry(t *testing.T) { -+ var try atomic.Int32 -+ -+ for _, tc := range []struct { -+ name string -+ handler Handler -+ resp Response -+ err error -+ }{ -+ { -+ name: ""retry failures"", -+ handler: HandlerFunc(func(_ context.Context, req Request) (Response, error) { -+ if try.Inc() == 5 { -+ return &PrometheusResponse{Status: ""Hello World""}, nil -+ } -+ return nil, fmt.Errorf(""fail"") -+ }), -+ resp: &PrometheusResponse{Status: ""Hello World""}, -+ }, -+ { -+ name: ""don't retry 400s"", -+ handler: HandlerFunc(func(_ context.Context, req Request) (Response, error) { -+ return nil, httpgrpc.Errorf(http.StatusBadRequest, ""Bad Request"") -+ }), -+ err: httpgrpc.Errorf(http.StatusBadRequest, ""Bad Request""), -+ }, -+ { -+ name: ""retry 500s"", -+ handler: HandlerFunc(func(_ context.Context, req Request) (Response, error) { -+ return nil, httpgrpc.Errorf(http.StatusInternalServerError, ""Internal Server Error"") -+ }), -+ err: httpgrpc.Errorf(http.StatusInternalServerError, ""Internal Server Error""), -+ }, -+ { -+ name: ""last error"", -+ handler: HandlerFunc(func(_ context.Context, req Request) (Response, error) { -+ if try.Inc() == 5 { -+ return nil, httpgrpc.Errorf(http.StatusBadRequest, ""Bad Request"") -+ } -+ return nil, httpgrpc.Errorf(http.StatusInternalServerError, ""Internal Server Error"") -+ }), -+ err: httpgrpc.Errorf(http.StatusBadRequest, ""Bad Request""), -+ }, -+ } { -+ t.Run(tc.name, func(t *testing.T) { -+ try.Store(0) -+ h := NewRetryMiddleware(log.NewNopLogger(), 5, nil).Wrap(tc.handler) -+ resp, err := h.Do(context.Background(), nil) -+ require.Equal(t, tc.err, err) -+ require.Equal(t, tc.resp, resp) -+ }) -+ } -+} -+ -+func Test_RetryMiddlewareCancel(t *testing.T) { -+ var try atomic.Int32 -+ ctx, cancel := context.WithCancel(context.Background()) -+ cancel() -+ _, err := NewRetryMiddleware(log.NewNopLogger(), 5, nil).Wrap( -+ HandlerFunc(func(c context.Context, r Request) (Response, error) { -+ try.Inc() -+ return nil, ctx.Err() -+ }), -+ ).Do(ctx, nil) -+ require.Equal(t, int32(0), try.Load()) -+ require.Equal(t, ctx.Err(), err) -+ -+ ctx, cancel = context.WithCancel(context.Background()) -+ _, err = NewRetryMiddleware(log.NewNopLogger(), 5, nil).Wrap( -+ HandlerFunc(func(c context.Context, r Request) (Response, error) { -+ try.Inc() -+ cancel() -+ return nil, errors.New(""failed"") -+ }), -+ ).Do(ctx, nil) -+ require.Equal(t, int32(1), try.Load()) -+ require.Equal(t, ctx.Err(), err) -+} -diff --git a/pkg/querier/queryrange/queryrangebase/roundtrip.go b/pkg/querier/queryrange/queryrangebase/roundtrip.go -index 59f99ed3ab907..27bd1dc4f0582 100644 ---- a/pkg/querier/queryrange/queryrangebase/roundtrip.go -+++ b/pkg/querier/queryrange/queryrangebase/roundtrip.go -@@ -41,6 +41,8 @@ import ( - ""github.com/grafana/loki/pkg/tenant"" - ) - -+const day = 24 * time.Hour -+ - var ( - // PassthroughMiddleware is a noop middleware - PassthroughMiddleware = MiddlewareFunc(func(next Handler) Handler { -diff --git a/pkg/querier/queryrange/queryrangebase/roundtrip_test.go b/pkg/querier/queryrange/queryrangebase/roundtrip_test.go -new file mode 100644 -index 0000000000000..790eed2569c3e ---- /dev/null -+++ b/pkg/querier/queryrange/queryrangebase/roundtrip_test.go -@@ -0,0 +1,124 @@ -+package queryrangebase -+ -+import ( -+ ""context"" -+ ""io/ioutil"" -+ ""net/http"" -+ ""net/http/httptest"" -+ ""net/url"" -+ ""strconv"" -+ ""testing"" -+ ""time"" -+ -+ ""github.com/go-kit/log"" -+ ""github.com/prometheus/prometheus/promql"" -+ ""github.com/stretchr/testify/require"" -+ ""github.com/weaveworks/common/middleware"" -+ ""github.com/weaveworks/common/user"" -+ -+ ""github.com/grafana/loki/pkg/storage/chunk"" -+) -+ -+func TestRoundTrip(t *testing.T) { -+ s := httptest.NewServer( -+ middleware.AuthenticateUser.Wrap( -+ http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { -+ var err error -+ if r.RequestURI == query { -+ _, err = w.Write([]byte(responseBody)) -+ } else { -+ _, err = w.Write([]byte(""bar"")) -+ } -+ if err != nil { -+ t.Fatal(err) -+ } -+ }), -+ ), -+ ) -+ defer s.Close() -+ -+ u, err := url.Parse(s.URL) -+ require.NoError(t, err) -+ -+ downstream := singleHostRoundTripper{ -+ host: u.Host, -+ next: http.DefaultTransport, -+ } -+ -+ tw, _, err := NewTripperware(Config{}, -+ log.NewNopLogger(), -+ mockLimits{}, -+ PrometheusCodec, -+ nil, -+ chunk.SchemaConfig{}, -+ promql.EngineOpts{ -+ Logger: log.NewNopLogger(), -+ Reg: nil, -+ MaxSamples: 1000, -+ Timeout: time.Minute, -+ }, -+ 0, -+ nil, -+ nil, -+ ) -+ -+ if err != nil { -+ t.Fatal(err) -+ } -+ -+ for i, tc := range []struct { -+ path, expectedBody string -+ }{ -+ {""/foo"", ""bar""}, -+ {query, responseBody}, -+ } { -+ t.Run(strconv.Itoa(i), func(t *testing.T) { -+ req, err := http.NewRequest(""GET"", tc.path, http.NoBody) -+ require.NoError(t, err) -+ -+ // query-frontend doesn't actually authenticate requests, we rely on -+ // the queriers to do this. Hence we ensure the request doesn't have a -+ // org ID in the ctx, but does have the header. -+ ctx := user.InjectOrgID(context.Background(), ""1"") -+ req = req.WithContext(ctx) -+ err = user.InjectOrgIDIntoHTTPRequest(ctx, req) -+ require.NoError(t, err) -+ -+ resp, err := tw(downstream).RoundTrip(req) -+ require.NoError(t, err) -+ require.Equal(t, 200, resp.StatusCode) -+ -+ bs, err := ioutil.ReadAll(resp.Body) -+ require.NoError(t, err) -+ require.Equal(t, tc.expectedBody, string(bs)) -+ }) -+ } -+} -+ -+type singleHostRoundTripper struct { -+ host string -+ next http.RoundTripper -+} -+ -+func (s singleHostRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { -+ r.URL.Scheme = ""http"" -+ r.URL.Host = s.host -+ return s.next.RoundTrip(r) -+} -+ -+func Test_ShardingConfigError(t *testing.T) { -+ _, _, err := NewTripperware( -+ Config{ShardedQueries: true}, -+ log.NewNopLogger(), -+ nil, -+ nil, -+ nil, -+ chunk.SchemaConfig{}, -+ promql.EngineOpts{}, -+ 0, -+ nil, -+ nil, -+ ) -+ -+ require.EqualError(t, err, errInvalidMinShardingLookback.Error()) -+} -diff --git a/pkg/querier/queryrange/queryrangebase/series_test.go b/pkg/querier/queryrange/queryrangebase/series_test.go -new file mode 100644 -index 0000000000000..728e0f01b7d80 ---- /dev/null -+++ b/pkg/querier/queryrange/queryrangebase/series_test.go -@@ -0,0 +1,75 @@ -+package queryrangebase -+ -+import ( -+ ""testing"" -+ -+ ""github.com/cortexproject/cortex/pkg/cortexpb"" -+ ""github.com/prometheus/prometheus/promql/parser"" -+ ""github.com/stretchr/testify/require"" -+) -+ -+func Test_ResponseToSamples(t *testing.T) { -+ input := &PrometheusResponse{ -+ Data: PrometheusData{ -+ ResultType: string(parser.ValueTypeMatrix), -+ Result: []SampleStream{ -+ { -+ Labels: []cortexpb.LabelAdapter{ -+ {Name: ""a"", Value: ""a1""}, -+ {Name: ""b"", Value: ""b1""}, -+ }, -+ Samples: []cortexpb.Sample{ -+ { -+ Value: 1, -+ TimestampMs: 1, -+ }, -+ { -+ Value: 2, -+ TimestampMs: 2, -+ }, -+ }, -+ }, -+ { -+ Labels: []cortexpb.LabelAdapter{ -+ {Name: ""a"", Value: ""a1""}, -+ {Name: ""b"", Value: ""b1""}, -+ }, -+ Samples: []cortexpb.Sample{ -+ { -+ Value: 8, -+ TimestampMs: 1, -+ }, -+ { -+ Value: 9, -+ TimestampMs: 2, -+ }, -+ }, -+ }, -+ }, -+ }, -+ } -+ -+ streams, err := ResponseToSamples(input) -+ require.Nil(t, err) -+ set := NewSeriesSet(streams) -+ -+ setCt := 0 -+ -+ for set.Next() { -+ iter := set.At().Iterator() -+ require.Nil(t, set.Err()) -+ -+ sampleCt := 0 -+ for iter.Next() { -+ ts, v := iter.At() -+ require.Equal(t, input.Data.Result[setCt].Samples[sampleCt].TimestampMs, ts) -+ require.Equal(t, input.Data.Result[setCt].Samples[sampleCt].Value, v) -+ sampleCt++ -+ } -+ require.Equal(t, len(input.Data.Result[setCt].Samples), sampleCt) -+ setCt++ -+ } -+ -+ require.Equal(t, len(input.Data.Result), setCt) -+ -+} -diff --git a/pkg/querier/queryrange/queryrangebase/split_by_interval_test.go b/pkg/querier/queryrange/queryrangebase/split_by_interval_test.go -new file mode 100644 -index 0000000000000..03c8050a9df60 ---- /dev/null -+++ b/pkg/querier/queryrange/queryrangebase/split_by_interval_test.go -@@ -0,0 +1,379 @@ -+package queryrangebase -+ -+import ( -+ ""context"" -+ ""io/ioutil"" -+ ""net/http"" -+ ""net/http/httptest"" -+ ""net/url"" -+ ""strconv"" -+ ""testing"" -+ ""time"" -+ -+ ""github.com/prometheus/prometheus/promql/parser"" -+ ""github.com/stretchr/testify/require"" -+ ""github.com/weaveworks/common/httpgrpc"" -+ ""github.com/weaveworks/common/middleware"" -+ ""github.com/weaveworks/common/user"" -+ ""go.uber.org/atomic"" -+) -+ -+const seconds = 1e3 // 1e3 milliseconds per second. -+ -+func TestNextIntervalBoundary(t *testing.T) { -+ for i, tc := range []struct { -+ in, step, out int64 -+ interval time.Duration -+ }{ -+ // Smallest possible period is 1 millisecond -+ {0, 1, toMs(day) - 1, day}, -+ {0, 1, toMs(time.Hour) - 1, time.Hour}, -+ // A more standard example -+ {0, 15 * seconds, toMs(day) - 15*seconds, day}, -+ {0, 15 * seconds, toMs(time.Hour) - 15*seconds, time.Hour}, -+ // Move start time forward 1 second; end time moves the same -+ {1 * seconds, 15 * seconds, toMs(day) - (15-1)*seconds, day}, -+ {1 * seconds, 15 * seconds, toMs(time.Hour) - (15-1)*seconds, time.Hour}, -+ // Move start time forward 14 seconds; end time moves the same -+ {14 * seconds, 15 * seconds, toMs(day) - (15-14)*seconds, day}, -+ {14 * seconds, 15 * seconds, toMs(time.Hour) - (15-14)*seconds, time.Hour}, -+ // Now some examples where the period does not divide evenly into a day: -+ // 1 day modulus 35 seconds = 20 seconds -+ {0, 35 * seconds, toMs(day) - 20*seconds, day}, -+ // 1 hour modulus 35 sec = 30 (3600 mod 35 = 30) -+ {0, 35 * seconds, toMs(time.Hour) - 30*seconds, time.Hour}, -+ // Move start time forward 1 second; end time moves the same -+ {1 * seconds, 35 * seconds, toMs(day) - (20-1)*seconds, day}, -+ {1 * seconds, 35 * seconds, toMs(time.Hour) - (30-1)*seconds, time.Hour}, -+ // If the end time lands exactly on midnight we stop one period before that -+ {20 * seconds, 35 * seconds, toMs(day) - 35*seconds, day}, -+ {30 * seconds, 35 * seconds, toMs(time.Hour) - 35*seconds, time.Hour}, -+ // This example starts 35 seconds after the 5th one ends -+ {toMs(day) + 15*seconds, 35 * seconds, 2*toMs(day) - 5*seconds, day}, -+ {toMs(time.Hour) + 15*seconds, 35 * seconds, 2*toMs(time.Hour) - 15*seconds, time.Hour}, -+ } { -+ t.Run(strconv.Itoa(i), func(t *testing.T) { -+ require.Equal(t, tc.out, nextIntervalBoundary(tc.in, tc.step, tc.interval)) -+ }) -+ } -+} -+ -+func TestSplitQuery(t *testing.T) { -+ for i, tc := range []struct { -+ input Request -+ expected []Request -+ interval time.Duration -+ }{ -+ { -+ input: &PrometheusRequest{ -+ Start: 0, -+ End: 60 * 60 * seconds, -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ expected: []Request{ -+ &PrometheusRequest{ -+ Start: 0, -+ End: 60 * 60 * seconds, -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ }, -+ interval: day, -+ }, -+ { -+ input: &PrometheusRequest{ -+ Start: 0, -+ End: 60 * 60 * seconds, -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ expected: []Request{ -+ &PrometheusRequest{ -+ Start: 0, -+ End: 60 * 60 * seconds, -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ }, -+ interval: 3 * time.Hour, -+ }, -+ { -+ input: &PrometheusRequest{ -+ Start: 0, -+ End: 24 * 3600 * seconds, -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ expected: []Request{ -+ &PrometheusRequest{ -+ Start: 0, -+ End: 24 * 3600 * seconds, -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ }, -+ interval: day, -+ }, -+ { -+ input: &PrometheusRequest{ -+ Start: 0, -+ End: 3 * 3600 * seconds, -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ expected: []Request{ -+ &PrometheusRequest{ -+ Start: 0, -+ End: 3 * 3600 * seconds, -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ }, -+ interval: 3 * time.Hour, -+ }, -+ { -+ input: &PrometheusRequest{ -+ Start: 0, -+ End: 2 * 24 * 3600 * seconds, -+ Step: 15 * seconds, -+ Query: ""foo @ start()"", -+ }, -+ expected: []Request{ -+ &PrometheusRequest{ -+ Start: 0, -+ End: (24 * 3600 * seconds) - (15 * seconds), -+ Step: 15 * seconds, -+ Query: ""foo @ 0.000"", -+ }, -+ &PrometheusRequest{ -+ Start: 24 * 3600 * seconds, -+ End: 2 * 24 * 3600 * seconds, -+ Step: 15 * seconds, -+ Query: ""foo @ 0.000"", -+ }, -+ }, -+ interval: day, -+ }, -+ { -+ input: &PrometheusRequest{ -+ Start: 0, -+ End: 2 * 3 * 3600 * seconds, -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ expected: []Request{ -+ &PrometheusRequest{ -+ Start: 0, -+ End: (3 * 3600 * seconds) - (15 * seconds), -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ &PrometheusRequest{ -+ Start: 3 * 3600 * seconds, -+ End: 2 * 3 * 3600 * seconds, -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ }, -+ interval: 3 * time.Hour, -+ }, -+ { -+ input: &PrometheusRequest{ -+ Start: 3 * 3600 * seconds, -+ End: 3 * 24 * 3600 * seconds, -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ expected: []Request{ -+ &PrometheusRequest{ -+ Start: 3 * 3600 * seconds, -+ End: (24 * 3600 * seconds) - (15 * seconds), -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ &PrometheusRequest{ -+ Start: 24 * 3600 * seconds, -+ End: (2 * 24 * 3600 * seconds) - (15 * seconds), -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ &PrometheusRequest{ -+ Start: 2 * 24 * 3600 * seconds, -+ End: 3 * 24 * 3600 * seconds, -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ }, -+ interval: day, -+ }, -+ { -+ input: &PrometheusRequest{ -+ Start: 2 * 3600 * seconds, -+ End: 3 * 3 * 3600 * seconds, -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ expected: []Request{ -+ &PrometheusRequest{ -+ Start: 2 * 3600 * seconds, -+ End: (3 * 3600 * seconds) - (15 * seconds), -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ &PrometheusRequest{ -+ Start: 3 * 3600 * seconds, -+ End: (2 * 3 * 3600 * seconds) - (15 * seconds), -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ &PrometheusRequest{ -+ Start: 2 * 3 * 3600 * seconds, -+ End: 3 * 3 * 3600 * seconds, -+ Step: 15 * seconds, -+ Query: ""foo"", -+ }, -+ }, -+ interval: 3 * time.Hour, -+ }, -+ } { -+ t.Run(strconv.Itoa(i), func(t *testing.T) { -+ days, err := splitQuery(tc.input, tc.interval) -+ require.NoError(t, err) -+ require.Equal(t, tc.expected, days) -+ }) -+ } -+} -+ -+func TestSplitByDay(t *testing.T) { -+ mergedResponse, err := PrometheusCodec.MergeResponse(parsedResponse, parsedResponse) -+ require.NoError(t, err) -+ -+ mergedHTTPResponse, err := PrometheusCodec.EncodeResponse(context.Background(), mergedResponse) -+ require.NoError(t, err) -+ -+ mergedHTTPResponseBody, err := ioutil.ReadAll(mergedHTTPResponse.Body) -+ require.NoError(t, err) -+ -+ for i, tc := range []struct { -+ path, expectedBody string -+ expectedQueryCount int32 -+ }{ -+ {query, string(mergedHTTPResponseBody), 2}, -+ } { -+ t.Run(strconv.Itoa(i), func(t *testing.T) { -+ var actualCount atomic.Int32 -+ s := httptest.NewServer( -+ middleware.AuthenticateUser.Wrap( -+ http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { -+ actualCount.Inc() -+ _, _ = w.Write([]byte(responseBody)) -+ }), -+ ), -+ ) -+ defer s.Close() -+ -+ u, err := url.Parse(s.URL) -+ require.NoError(t, err) -+ -+ interval := func(_ Request) time.Duration { return 24 * time.Hour } -+ roundtripper := NewRoundTripper(singleHostRoundTripper{ -+ host: u.Host, -+ next: http.DefaultTransport, -+ }, PrometheusCodec, nil, NewLimitsMiddleware(mockLimits{}), SplitByIntervalMiddleware(interval, mockLimits{}, PrometheusCodec, nil)) -+ -+ req, err := http.NewRequest(""GET"", tc.path, http.NoBody) -+ require.NoError(t, err) -+ -+ ctx := user.InjectOrgID(context.Background(), ""1"") -+ req = req.WithContext(ctx) -+ -+ resp, err := roundtripper.RoundTrip(req) -+ require.NoError(t, err) -+ require.Equal(t, 200, resp.StatusCode) -+ -+ bs, err := ioutil.ReadAll(resp.Body) -+ require.NoError(t, err) -+ require.Equal(t, tc.expectedBody, string(bs)) -+ require.Equal(t, tc.expectedQueryCount, actualCount.Load()) -+ }) -+ } -+} -+ -+func Test_evaluateAtModifier(t *testing.T) { -+ const ( -+ start, end = int64(1546300800), int64(1646300800) -+ ) -+ for _, tt := range []struct { -+ in, expected string -+ expectedErrorCode int -+ }{ -+ { -+ in: ""topk(5, rate(http_requests_total[1h] @ start()))"", -+ expected: ""topk(5, rate(http_requests_total[1h] @ 1546300.800))"", -+ }, -+ { -+ in: ""topk(5, rate(http_requests_total[1h] @ 0))"", -+ expected: ""topk(5, rate(http_requests_total[1h] @ 0.000))"", -+ }, -+ { -+ in: ""http_requests_total[1h] @ 10.001"", -+ expected: ""http_requests_total[1h] @ 10.001"", -+ }, -+ { -+ in: `min_over_time( -+ sum by(cluster) ( -+ rate(http_requests_total[5m] @ end()) -+ )[10m:] -+ ) -+ or -+ max_over_time( -+ stddev_over_time( -+ deriv( -+ rate(http_requests_total[10m] @ start()) -+ [5m:1m]) -+ [2m:]) -+ [10m:])`, -+ expected: `min_over_time( -+ sum by(cluster) ( -+ rate(http_requests_total[5m] @ 1646300.800) -+ )[10m:] -+ ) -+ or -+ max_over_time( -+ stddev_over_time( -+ deriv( -+ rate(http_requests_total[10m] @ 1546300.800) -+ [5m:1m]) -+ [2m:]) -+ [10m:])`, -+ }, -+ { -+ // parse error: missing unit character in duration -+ in: ""http_requests_total[5] @ 10.001"", -+ expectedErrorCode: http.StatusBadRequest, -+ }, -+ { -+ // parse error: @ modifier must be preceded by an instant vector selector or range vector selector or a subquery -+ in: ""sum(http_requests_total[5m]) @ 10.001"", -+ expectedErrorCode: http.StatusBadRequest, -+ }, -+ } { -+ tt := tt -+ t.Run(tt.in, func(t *testing.T) { -+ t.Parallel() -+ out, err := evaluateAtModifierFunction(tt.in, start, end) -+ if tt.expectedErrorCode != 0 { -+ require.Error(t, err) -+ httpResp, ok := httpgrpc.HTTPResponseFromError(err) -+ require.True(t, ok, ""returned error is not an httpgrpc response"") -+ require.Equal(t, tt.expectedErrorCode, int(httpResp.Code)) -+ } else { -+ require.NoError(t, err) -+ expectedExpr, err := parser.ParseExpr(tt.expected) -+ require.NoError(t, err) -+ require.Equal(t, expectedExpr.String(), out) -+ } -+ }) -+ } -+} -diff --git a/pkg/querier/queryrange/queryrangebase/step_align_test.go b/pkg/querier/queryrange/queryrangebase/step_align_test.go -new file mode 100644 -index 0000000000000..d68a2080d4b25 ---- /dev/null -+++ b/pkg/querier/queryrange/queryrangebase/step_align_test.go -@@ -0,0 +1,54 @@ -+package queryrangebase -+ -+import ( -+ ""context"" -+ ""strconv"" -+ ""testing"" -+ -+ ""github.com/stretchr/testify/require"" -+) -+ -+func TestStepAlign(t *testing.T) { -+ for i, tc := range []struct { -+ input, expected *PrometheusRequest -+ }{ -+ { -+ input: &PrometheusRequest{ -+ Start: 0, -+ End: 100, -+ Step: 10, -+ }, -+ expected: &PrometheusRequest{ -+ Start: 0, -+ End: 100, -+ Step: 10, -+ }, -+ }, -+ -+ { -+ input: &PrometheusRequest{ -+ Start: 2, -+ End: 102, -+ Step: 10, -+ }, -+ expected: &PrometheusRequest{ -+ Start: 0, -+ End: 100, -+ Step: 10, -+ }, -+ }, -+ } { -+ t.Run(strconv.Itoa(i), func(t *testing.T) { -+ var result *PrometheusRequest -+ s := stepAlign{ -+ next: HandlerFunc(func(_ context.Context, req Request) (Response, error) { -+ result = req.(*PrometheusRequest) -+ return nil, nil -+ }), -+ } -+ _, err := s.Do(context.Background(), tc.input) -+ require.NoError(t, err) -+ require.Equal(t, tc.expected, result) -+ }) -+ } -+} -diff --git a/pkg/querier/queryrange/queryrangebase/test_utils_test.go b/pkg/querier/queryrange/queryrangebase/test_utils_test.go -new file mode 100644 -index 0000000000000..9272fec77d7b0 ---- /dev/null -+++ b/pkg/querier/queryrange/queryrangebase/test_utils_test.go -@@ -0,0 +1,135 @@ -+package queryrangebase -+ -+import ( -+ ""math"" -+ ""sort"" -+ ""testing"" -+ -+ ""github.com/prometheus/prometheus/model/labels"" -+ ""github.com/stretchr/testify/require"" -+ -+ ""github.com/grafana/loki/pkg/querier/astmapper"" -+) -+ -+func TestGenLabelsCorrectness(t *testing.T) { -+ ls := genLabels([]string{""a"", ""b""}, 2) -+ for _, set := range ls { -+ sort.Sort(set) -+ } -+ expected := []labels.Labels{ -+ { -+ labels.Label{ -+ Name: ""a"", -+ Value: ""0"", -+ }, -+ labels.Label{ -+ Name: ""b"", -+ Value: ""0"", -+ }, -+ }, -+ { -+ labels.Label{ -+ Name: ""a"", -+ Value: ""0"", -+ }, -+ labels.Label{ -+ Name: ""b"", -+ Value: ""1"", -+ }, -+ }, -+ { -+ labels.Label{ -+ Name: ""a"", -+ Value: ""1"", -+ }, -+ labels.Label{ -+ Name: ""b"", -+ Value: ""0"", -+ }, -+ }, -+ { -+ labels.Label{ -+ Name: ""a"", -+ Value: ""1"", -+ }, -+ labels.Label{ -+ Name: ""b"", -+ Value: ""1"", -+ }, -+ }, -+ } -+ require.Equal(t, expected, ls) -+} -+ -+func TestGenLabelsSize(t *testing.T) { -+ for _, tc := range []struct { -+ set []string -+ buckets int -+ }{ -+ { -+ set: []string{""a"", ""b""}, -+ buckets: 5, -+ }, -+ { -+ set: []string{""a"", ""b"", ""c""}, -+ buckets: 10, -+ }, -+ } { -+ sets := genLabels(tc.set, tc.buckets) -+ require.Equal( -+ t, -+ math.Pow(float64(tc.buckets), float64(len(tc.set))), -+ float64(len(sets)), -+ ) -+ } -+} -+ -+func TestNewMockShardedqueryable(t *testing.T) { -+ for _, tc := range []struct { -+ shards, nSamples, labelBuckets int -+ labelSet []string -+ }{ -+ { -+ nSamples: 100, -+ shards: 1, -+ labelBuckets: 3, -+ labelSet: []string{""a"", ""b"", ""c""}, -+ }, -+ { -+ nSamples: 0, -+ shards: 2, -+ labelBuckets: 3, -+ labelSet: []string{""a"", ""b"", ""c""}, -+ }, -+ } { -+ q := NewMockShardedQueryable(tc.nSamples, tc.labelSet, tc.labelBuckets, 0) -+ expectedSeries := int(math.Pow(float64(tc.labelBuckets), float64(len(tc.labelSet)))) -+ -+ seriesCt := 0 -+ for i := 0; i < tc.shards; i++ { -+ -+ set := q.Select(false, nil, &labels.Matcher{ -+ Type: labels.MatchEqual, -+ Name: astmapper.ShardLabel, -+ Value: astmapper.ShardAnnotation{ -+ Shard: i, -+ Of: tc.shards, -+ }.String(), -+ }) -+ -+ require.Nil(t, set.Err()) -+ -+ for set.Next() { -+ seriesCt++ -+ iter := set.At().Iterator() -+ samples := 0 -+ for iter.Next() { -+ samples++ -+ } -+ require.Equal(t, tc.nSamples, samples) -+ } -+ -+ } -+ require.Equal(t, expectedSeries, seriesCt) -+ } -+} -diff --git a/pkg/querier/queryrange/queryrangebase/value_test.go b/pkg/querier/queryrange/queryrangebase/value_test.go -new file mode 100644 -index 0000000000000..ffaed937ba558 ---- /dev/null -+++ b/pkg/querier/queryrange/queryrangebase/value_test.go -@@ -0,0 +1,168 @@ -+package queryrangebase -+ -+import ( -+ ""fmt"" -+ ""testing"" -+ -+ ""github.com/pkg/errors"" -+ ""github.com/prometheus/prometheus/model/labels"" -+ ""github.com/prometheus/prometheus/promql"" -+ ""github.com/stretchr/testify/require"" -+ -+ ""github.com/cortexproject/cortex/pkg/cortexpb"" -+) -+ -+func TestFromValue(t *testing.T) { -+ var testExpr = []struct { -+ input *promql.Result -+ err bool -+ expected []SampleStream -+ }{ -+ // string (errors) -+ { -+ input: &promql.Result{Value: promql.String{T: 1, V: ""hi""}}, -+ err: true, -+ }, -+ { -+ input: &promql.Result{Err: errors.New(""foo"")}, -+ err: true, -+ }, -+ // Scalar -+ { -+ input: &promql.Result{Value: promql.Scalar{T: 1, V: 1}}, -+ err: false, -+ expected: []SampleStream{ -+ { -+ Samples: []cortexpb.Sample{ -+ { -+ Value: 1, -+ TimestampMs: 1, -+ }, -+ }, -+ }, -+ }, -+ }, -+ // Vector -+ { -+ input: &promql.Result{ -+ Value: promql.Vector{ -+ promql.Sample{ -+ Point: promql.Point{T: 1, V: 1}, -+ Metric: labels.Labels{ -+ {Name: ""a"", Value: ""a1""}, -+ {Name: ""b"", Value: ""b1""}, -+ }, -+ }, -+ promql.Sample{ -+ Point: promql.Point{T: 2, V: 2}, -+ Metric: labels.Labels{ -+ {Name: ""a"", Value: ""a2""}, -+ {Name: ""b"", Value: ""b2""}, -+ }, -+ }, -+ }, -+ }, -+ err: false, -+ expected: []SampleStream{ -+ { -+ Labels: []cortexpb.LabelAdapter{ -+ {Name: ""a"", Value: ""a1""}, -+ {Name: ""b"", Value: ""b1""}, -+ }, -+ Samples: []cortexpb.Sample{ -+ { -+ Value: 1, -+ TimestampMs: 1, -+ }, -+ }, -+ }, -+ { -+ Labels: []cortexpb.LabelAdapter{ -+ {Name: ""a"", Value: ""a2""}, -+ {Name: ""b"", Value: ""b2""}, -+ }, -+ Samples: []cortexpb.Sample{ -+ { -+ Value: 2, -+ TimestampMs: 2, -+ }, -+ }, -+ }, -+ }, -+ }, -+ // Matrix -+ { -+ input: &promql.Result{ -+ Value: promql.Matrix{ -+ { -+ Metric: labels.Labels{ -+ {Name: ""a"", Value: ""a1""}, -+ {Name: ""b"", Value: ""b1""}, -+ }, -+ Points: []promql.Point{ -+ {T: 1, V: 1}, -+ {T: 2, V: 2}, -+ }, -+ }, -+ { -+ Metric: labels.Labels{ -+ {Name: ""a"", Value: ""a2""}, -+ {Name: ""b"", Value: ""b2""}, -+ }, -+ Points: []promql.Point{ -+ {T: 1, V: 8}, -+ {T: 2, V: 9}, -+ }, -+ }, -+ }, -+ }, -+ err: false, -+ expected: []SampleStream{ -+ { -+ Labels: []cortexpb.LabelAdapter{ -+ {Name: ""a"", Value: ""a1""}, -+ {Name: ""b"", Value: ""b1""}, -+ }, -+ Samples: []cortexpb.Sample{ -+ { -+ Value: 1, -+ TimestampMs: 1, -+ }, -+ { -+ Value: 2, -+ TimestampMs: 2, -+ }, -+ }, -+ }, -+ { -+ Labels: []cortexpb.LabelAdapter{ -+ {Name: ""a"", Value: ""a2""}, -+ {Name: ""b"", Value: ""b2""}, -+ }, -+ Samples: []cortexpb.Sample{ -+ { -+ Value: 8, -+ TimestampMs: 1, -+ }, -+ { -+ Value: 9, -+ TimestampMs: 2, -+ }, -+ }, -+ }, -+ }, -+ }, -+ } -+ -+ for i, c := range testExpr { -+ t.Run(fmt.Sprintf(""[%d]"", i), func(t *testing.T) { -+ result, err := FromResult(c.input) -+ if c.err { -+ require.NotNil(t, err) -+ } else { -+ require.Nil(t, err) -+ require.Equal(t, c.expected, result) -+ } -+ }) -+ } -+}",unknown,Addition of cortex' queryrange tests (#5183) -ca19683511aebc16a61ef49e251d4df01e3e68fe,2020-03-18 00:06:49,Peter Štibraný,"loki: update Cortex to master (#1799) - -* Update Cortex to master - -Signed-off-by: Peter Štibraný - -* Integrate latest Cortex master changes into loki - -- Frontend worker now needs to be started explicitly -- Lifecycler no longer exits on error. Distributor and Ingester do that now - -Signed-off-by: Peter Štibraný - -* Lint - -Signed-off-by: Peter Štibraný - -* go mod tidy - -Signed-off-by: Peter Štibraný - -* Update Cortex to master. After 0.7 release, it is now showing as v0.7.1-... - -Signed-off-by: Peter Štibraný ",False,"diff --git a/go.mod b/go.mod -index f66d9672319a4..8717ee2235c95 100644 ---- a/go.mod -+++ b/go.mod -@@ -10,7 +10,7 @@ require ( - github.com/containerd/containerd v1.3.2 // indirect - github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect - github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e -- github.com/cortexproject/cortex v0.7.0-rc.0 -+ github.com/cortexproject/cortex v0.7.1-0.20200316184320-acc42abdf56c - github.com/davecgh/go-spew v1.1.1 - github.com/docker/distribution v2.7.1+incompatible // indirect - github.com/docker/docker v0.7.3-0.20190817195342-4760db040282 -diff --git a/go.sum b/go.sum -index 69ec6f29739e3..857119ed10428 100644 ---- a/go.sum -+++ b/go.sum -@@ -158,8 +158,8 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 - github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= - github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= - github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= --github.com/cortexproject/cortex v0.7.0-rc.0 h1:oa/RzR9E09/5AkmTPGk97ObbhZmB5TycFzL59inProQ= --github.com/cortexproject/cortex v0.7.0-rc.0/go.mod h1:aiDfjSBZGE+q213mWACqjawNVN9CqFG4F+20TkeChA0= -+github.com/cortexproject/cortex v0.7.1-0.20200316184320-acc42abdf56c h1:WQUYYiNH49fS9bZXLbikGO0eexb8dx8W5rIe/iCiKNs= -+github.com/cortexproject/cortex v0.7.1-0.20200316184320-acc42abdf56c/go.mod h1:dMuT8RuWexf371937IhTj7/Ha3P/+Aog3pddNtV6Jo0= - github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= - github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= - github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= -@@ -770,7 +770,6 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM - github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= - github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= - github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= --github.com/weaveworks/common v0.0.0-20200206153930-760e36ae819a/go.mod h1:6enWAqfQBFrE8X/XdJwZr8IKgh1chStuFR0mjU/UOUw= - github.com/weaveworks/common v0.0.0-20200310113808-2708ba4e60a4 h1:H1CjeKf1q/bL7OBvb6KZclHsvnGRGr0Tsuy6y5rtFEc= - github.com/weaveworks/common v0.0.0-20200310113808-2708ba4e60a4/go.mod h1:6enWAqfQBFrE8X/XdJwZr8IKgh1chStuFR0mjU/UOUw= - github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= -diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go -index 47495336c60c5..fc5773bd81018 100644 ---- a/pkg/distributor/distributor.go -+++ b/pkg/distributor/distributor.go -@@ -4,6 +4,7 @@ import ( - ""context"" - ""flag"" - ""net/http"" -+ ""os"" - ""sync/atomic"" - ""time"" - -@@ -113,6 +114,13 @@ func New(cfg Config, clientCfg client.Config, ingestersRing ring.ReadRing, overr - return nil, err - } - -+ distributorsRing.AddListener(services.NewListener(nil, nil, nil, nil, func(_ services.State, failure error) { -+ // lifecycler used to do os.Exit(1) on its own failure, but now it just goes into Failed state. -+ // for now we just simulate old behaviour here. When Distributor itself becomes a service, it will enter Failed state as well. -+ level.Error(cortex_util.Logger).Log(""msg"", ""lifecycler failed"", ""err"", err) -+ os.Exit(1) -+ })) -+ - err = services.StartAndAwaitRunning(context.Background(), distributorsRing) - if err != nil { - return nil, err -diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go -index db96d44b0cc95..a752cc8cbde6c 100644 ---- a/pkg/ingester/ingester.go -+++ b/pkg/ingester/ingester.go -@@ -6,6 +6,7 @@ import ( - ""flag"" - ""fmt"" - ""net/http"" -+ ""os"" - ""sync"" - ""time"" - -@@ -148,6 +149,13 @@ func New(cfg Config, clientConfig client.Config, store ChunkStore, limits *valid - return nil, err - } - -+ i.lifecycler.AddListener(services.NewListener(nil, nil, nil, nil, func(_ services.State, failure error) { -+ // lifecycler used to do os.Exit(1) on its own failure, but now it just goes into Failed state. -+ // for now we just simulate old behaviour here. When Ingester itself becomes a service, it will enter Failed state as well. -+ level.Error(util.Logger).Log(""msg"", ""lifecycler failed"", ""err"", err) -+ os.Exit(1) -+ })) -+ - err = services.StartAndAwaitRunning(context.Background(), i.lifecycler) - if err != nil { - return nil, err -diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go -index c678aeacbd345..6b14a3e50f20f 100644 ---- a/pkg/loki/loki.go -+++ b/pkg/loki/loki.go -@@ -85,7 +85,6 @@ type Loki struct { - querier *querier.Querier - store storage.Store - tableManager *chunk.TableManager -- worker frontend.Worker - frontend *frontend.Frontend - stopper queryrange.Stopper - runtimeConfig *runtimeconfig.Manager -diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go -index 7c14738cf7d1f..86f00005e53d4 100644 ---- a/pkg/loki/modules.go -+++ b/pkg/loki/modules.go -@@ -150,15 +150,23 @@ func (t *Loki) stopDistributor() (err error) { - return nil - } - --func (t *Loki) initQuerier() (err error) { -+func (t *Loki) initQuerier() error { - level.Debug(util.Logger).Log(""msg"", ""initializing querier worker"", ""config"", fmt.Sprintf(""%+v"", t.cfg.Worker)) -- t.worker, err = frontend.NewWorker(t.cfg.Worker, httpgrpc_server.NewServer(t.server.HTTPServer.Handler), util.Logger) -+ worker, err := frontend.NewWorker(t.cfg.Worker, httpgrpc_server.NewServer(t.server.HTTPServer.Handler), util.Logger) - if err != nil { -- return -+ return err - } -+ // worker is nil, if no address is defined. -+ if worker != nil { -+ err = services.StartAndAwaitRunning(context.Background(), worker) -+ if err != nil { -+ return err -+ } -+ } -+ - t.querier, err = querier.New(t.cfg.Querier, t.cfg.IngesterClient, t.ring, t.store, t.overrides) - if err != nil { -- return -+ return err - } - - httpMiddleware := middleware.Merge( -@@ -182,7 +190,7 @@ func (t *Loki) initQuerier() (err error) { - t.server.HTTP.Handle(""/api/prom/label/{name}/values"", httpMiddleware.Wrap(http.HandlerFunc(t.querier.LabelHandler))) - t.server.HTTP.Handle(""/api/prom/tail"", httpMiddleware.Wrap(http.HandlerFunc(t.querier.TailHandler))) - t.server.HTTP.Handle(""/api/prom/series"", httpMiddleware.Wrap(http.HandlerFunc(t.querier.SeriesHandler))) -- return -+ return nil - } - - func (t *Loki) initIngester() (err error) { -diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go -index 88d71bd84e328..fa2b6679fdc96 100644 ---- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go -+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go -@@ -4,6 +4,8 @@ import ( - ""context"" - ""flag"" - ""fmt"" -+ ""net"" -+ ""net/http"" - ""net/url"" - ""strings"" - ""time"" -@@ -865,5 +867,23 @@ func awsSessionFromURL(awsURL *url.URL) (client.ConfigProvider, error) { - return nil, err - } - config = config.WithMaxRetries(0) // We do our own retries, so we can monitor them -+ config = config.WithHTTPClient(&http.Client{Transport: defaultTransport}) - return session.NewSession(config) - } -+ -+// Copy-pasted http.DefaultTransport -+var defaultTransport http.RoundTripper = &http.Transport{ -+ Proxy: http.ProxyFromEnvironment, -+ DialContext: (&net.Dialer{ -+ Timeout: 30 * time.Second, -+ KeepAlive: 30 * time.Second, -+ }).DialContext, -+ ForceAttemptHTTP2: true, -+ MaxIdleConns: 100, -+ // We will connect many times in parallel to the same DynamoDB server, -+ // see https://github.com/golang/go/issues/13801 -+ MaxIdleConnsPerHost: 100, -+ IdleConnTimeout: 90 * time.Second, -+ TLSHandshakeTimeout: 10 * time.Second, -+ ExpectContinueTimeout: 1 * time.Second, -+} -diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go -index 339945cf35d47..7083d77321fd5 100644 ---- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go -+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go -@@ -6,9 +6,11 @@ import ( - ""fmt"" - ""hash/fnv"" - ""io"" -+ ""net/http"" - ""strings"" - - ""github.com/aws/aws-sdk-go/aws"" -+ ""github.com/aws/aws-sdk-go/aws/awserr"" - ""github.com/aws/aws-sdk-go/aws/session"" - ""github.com/aws/aws-sdk-go/service/s3"" - ""github.com/aws/aws-sdk-go/service/s3/s3iface"" -@@ -71,6 +73,7 @@ func NewS3ObjectClient(cfg S3Config) (*S3ObjectClient, error) { - s3Config = s3Config.WithS3ForcePathStyle(cfg.S3ForcePathStyle) // support for Path Style S3 url if has the flag - - s3Config = s3Config.WithMaxRetries(0) // We do our own retries, so we can monitor them -+ s3Config = s3Config.WithHTTPClient(&http.Client{Transport: defaultTransport}) - sess, err := session.NewSession(s3Config) - if err != nil { - return nil, err -@@ -90,9 +93,23 @@ func NewS3ObjectClient(cfg S3Config) (*S3ObjectClient, error) { - // Stop fulfills the chunk.ObjectClient interface - func (a *S3ObjectClient) Stop() {} - --func (a *S3ObjectClient) DeleteObject(ctx context.Context, chunkID string) error { -- // ToDo: implement this to support deleting chunks from S3 -- return chunk.ErrMethodNotImplemented -+// DeleteObject deletes the specified objectKey from the appropriate S3 bucket -+func (a *S3ObjectClient) DeleteObject(ctx context.Context, objectKey string) error { -+ _, err := a.S3.DeleteObject(&s3.DeleteObjectInput{ -+ Bucket: aws.String(a.bucketFromKey(objectKey)), -+ Key: aws.String(objectKey), -+ }) -+ -+ if err != nil { -+ if aerr, ok := err.(awserr.Error); ok { -+ if aerr.Code() == s3.ErrCodeNoSuchKey { -+ return chunk.ErrStorageObjectNotFound -+ } -+ } -+ return err -+ } -+ -+ return nil - } - - // bucketFromKey maps a key to a bucket name -@@ -108,7 +125,8 @@ func (a *S3ObjectClient) bucketFromKey(key string) string { - return a.bucketNames[hash%uint32(len(a.bucketNames))] - } - --// Get object from the store -+// GetObject returns a reader for the specified object key from the configured S3 bucket. If the -+// key does not exist a generic chunk.ErrStorageObjectNotFound error is returned. - func (a *S3ObjectClient) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, error) { - var resp *s3.GetObjectOutput - -@@ -123,7 +141,13 @@ func (a *S3ObjectClient) GetObject(ctx context.Context, objectKey string) (io.Re - }) - return err - }) -+ - if err != nil { -+ if aerr, ok := err.(awserr.Error); ok { -+ if aerr.Code() == s3.ErrCodeNoSuchKey { -+ return nil, chunk.ErrStorageObjectNotFound -+ } -+ } - return nil, err - } - -diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go -index 0b8a6b30407cc..9eb3cd87ec876 100644 ---- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go -+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go -@@ -5,6 +5,7 @@ import ( - ""flag"" - ""sync"" - ""time"" -+ ""unsafe"" - - ""github.com/prometheus/client_golang/prometheus"" - ""github.com/prometheus/client_golang/prometheus/promauto"" -@@ -32,6 +33,13 @@ var ( - Help: ""The total number of evicted entries"", - }, []string{""cache""}) - -+ cacheEntriesCurrent = promauto.NewGaugeVec(prometheus.GaugeOpts{ -+ Namespace: ""querier"", -+ Subsystem: ""cache"", -+ Name: ""entries"", -+ Help: ""The total number of entries"", -+ }, []string{""cache""}) -+ - cacheTotalGets = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: ""querier"", - Subsystem: ""cache"", -@@ -52,6 +60,13 @@ var ( - Name: ""stale_gets_total"", - Help: ""The total number of Get calls that had an entry which expired"", - }, []string{""cache""}) -+ -+ cacheMemoryBytes = promauto.NewGaugeVec(prometheus.GaugeOpts{ -+ Namespace: ""querier"", -+ Subsystem: ""cache"", -+ Name: ""memory_bytes"", -+ Help: ""The current cache size in bytes"", -+ }, []string{""cache""}) - ) - - // FifoCacheConfig holds config for the FifoCache. -@@ -81,9 +96,11 @@ type FifoCache struct { - entriesAdded prometheus.Counter - entriesAddedNew prometheus.Counter - entriesEvicted prometheus.Counter -+ entriesCurrent prometheus.Gauge - totalGets prometheus.Counter - totalMisses prometheus.Counter - staleGets prometheus.Counter -+ memoryBytes prometheus.Gauge - } - - type cacheEntry struct { -@@ -96,7 +113,7 @@ type cacheEntry struct { - // NewFifoCache returns a new initialised FifoCache of size. - // TODO(bwplotka): Fix metrics, get them out of globals, separate or allow prefixing. - func NewFifoCache(name string, cfg FifoCacheConfig) *FifoCache { -- return &FifoCache{ -+ cache := &FifoCache{ - size: cfg.Size, - validity: cfg.Validity, - entries: make([]cacheEntry, 0, cfg.Size), -@@ -106,10 +123,15 @@ func NewFifoCache(name string, cfg FifoCacheConfig) *FifoCache { - entriesAdded: cacheEntriesAdded.WithLabelValues(name), - entriesAddedNew: cacheEntriesAddedNew.WithLabelValues(name), - entriesEvicted: cacheEntriesEvicted.WithLabelValues(name), -+ entriesCurrent: cacheEntriesCurrent.WithLabelValues(name), - totalGets: cacheTotalGets.WithLabelValues(name), - totalMisses: cacheTotalMisses.WithLabelValues(name), - staleGets: cacheStaleGets.WithLabelValues(name), -+ memoryBytes: cacheMemoryBytes.WithLabelValues(name), - } -+ // set initial memory allocation -+ cache.memoryBytes.Set(float64(int(unsafe.Sizeof(cacheEntry{})) * cache.size)) -+ return cache - } - - // Fetch implements Cache. -@@ -162,6 +184,7 @@ func (c *FifoCache) put(ctx context.Context, key string, value interface{}) { - index, ok := c.index[key] - if ok { - entry := c.entries[index] -+ deltaSize := sizeOf(value) - sizeOf(entry.value) - - entry.updated = time.Now() - entry.value = value -@@ -170,6 +193,11 @@ func (c *FifoCache) put(ctx context.Context, key string, value interface{}) { - c.entries[entry.prev].next = entry.next - c.entries[entry.next].prev = entry.prev - -+ // Corner case: updating last element -+ if c.last == index { -+ c.last = entry.prev -+ } -+ - // Insert it at the beginning - entry.next = c.first - entry.prev = c.last -@@ -178,6 +206,7 @@ func (c *FifoCache) put(ctx context.Context, key string, value interface{}) { - c.first = index - - c.entries[index] = entry -+ c.memoryBytes.Add(float64(deltaSize)) - return - } - c.entriesAddedNew.Inc() -@@ -187,6 +216,7 @@ func (c *FifoCache) put(ctx context.Context, key string, value interface{}) { - c.entriesEvicted.Inc() - index = c.last - entry := c.entries[index] -+ deltaSize := sizeOf(key) + sizeOf(value) - sizeOf(entry.key) - sizeOf(entry.value) - - c.last = entry.prev - c.first = index -@@ -197,6 +227,7 @@ func (c *FifoCache) put(ctx context.Context, key string, value interface{}) { - entry.value = value - entry.key = key - c.entries[index] = entry -+ c.memoryBytes.Add(float64(deltaSize)) - return - } - -@@ -213,6 +244,9 @@ func (c *FifoCache) put(ctx context.Context, key string, value interface{}) { - c.entries[c.last].next = index - c.first = index - c.index[key] = index -+ -+ c.memoryBytes.Add(float64(sizeOf(key) + sizeOf(value))) -+ c.entriesCurrent.Inc() - } - - // Get returns the stored value against the key and when the key was last updated. -@@ -240,3 +274,39 @@ func (c *FifoCache) Get(ctx context.Context, key string) (interface{}, bool) { - c.totalMisses.Inc() - return nil, false - } -+ -+func sizeOf(i interface{}) int { -+ switch v := i.(type) { -+ case string: -+ return len(v) -+ case []int8: -+ return len(v) -+ case []uint8: -+ return len(v) -+ case []int32: -+ return len(v) * 4 -+ case []uint32: -+ return len(v) * 4 -+ case []float32: -+ return len(v) * 4 -+ case []int64: -+ return len(v) * 8 -+ case []uint64: -+ return len(v) * 8 -+ case []float64: -+ return len(v) * 8 -+ // next 2 cases are machine dependent -+ case []int: -+ if l := len(v); l > 0 { -+ return int(unsafe.Sizeof(v[0])) * l -+ } -+ return 0 -+ case []uint: -+ if l := len(v); l > 0 { -+ return int(unsafe.Sizeof(v[0])) * l -+ } -+ return 0 -+ default: -+ return int(unsafe.Sizeof(i)) -+ } -+} -diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go -index 4785e1951a54f..f8703ef69e3c6 100644 ---- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go -+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go -@@ -64,7 +64,8 @@ func (s *GCSObjectClient) Stop() { - s.client.Close() - } - --// Get object from the store -+// GetObject returns a reader for the specified object key from the configured GCS bucket. If the -+// key does not exist a generic chunk.ErrStorageObjectNotFound error is returned. - func (s *GCSObjectClient) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, error) { - if s.cfg.RequestTimeout > 0 { - // The context will be cancelled with the timeout or when the parent context is cancelled, whichever occurs first. -@@ -73,10 +74,19 @@ func (s *GCSObjectClient) GetObject(ctx context.Context, objectKey string) (io.R - defer cancel() - } - -- return s.bucket.Object(objectKey).NewReader(ctx) -+ reader, err := s.bucket.Object(objectKey).NewReader(ctx) -+ -+ if err != nil { -+ if err == storage.ErrObjectNotExist { -+ return nil, chunk.ErrStorageObjectNotFound -+ } -+ return nil, err -+ } -+ -+ return reader, nil - } - --// Put object into the store -+// PutObject puts the specified bytes into the configured GCS bucket at the provided key - func (s *GCSObjectClient) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error { - writer := s.bucket.Object(objectKey).NewWriter(ctx) - // Default GCSChunkSize is 8M and for each call, 8M is allocated xD -@@ -128,7 +138,17 @@ func (s *GCSObjectClient) List(ctx context.Context, prefix string) ([]chunk.Stor - return storageObjects, nil - } - --func (s *GCSObjectClient) DeleteObject(ctx context.Context, chunkID string) error { -- // ToDo: implement this to support deleting chunks from GCS -- return chunk.ErrMethodNotImplemented -+// DeleteObject deletes the specified object key from the configured GCS bucket. If the -+// key does not exist a generic chunk.ErrStorageObjectNotFound error is returned. -+func (s *GCSObjectClient) DeleteObject(ctx context.Context, objectKey string) error { -+ err := s.bucket.Object(objectKey).Delete(ctx) -+ -+ if err != nil { -+ if err == storage.ErrObjectNotExist { -+ return chunk.ErrStorageObjectNotFound -+ } -+ return err -+ } -+ -+ return nil - } -diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_plan.pb.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_plan.pb.go -new file mode 100644 -index 0000000000000..ab1ef599ac673 ---- /dev/null -+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_plan.pb.go -@@ -0,0 +1,1354 @@ -+// Code generated by protoc-gen-gogo. DO NOT EDIT. -+// source: delete_plan.proto -+ -+package purger -+ -+import ( -+ fmt ""fmt"" -+ _ ""github.com/cortexproject/cortex/pkg/ingester/client"" -+ github_com_cortexproject_cortex_pkg_ingester_client ""github.com/cortexproject/cortex/pkg/ingester/client"" -+ _ ""github.com/gogo/protobuf/gogoproto"" -+ proto ""github.com/gogo/protobuf/proto"" -+ io ""io"" -+ math ""math"" -+ math_bits ""math/bits"" -+ reflect ""reflect"" -+ strings ""strings"" -+) -+ -+// Reference imports to suppress errors if they are not otherwise used. -+var _ = proto.Marshal -+var _ = fmt.Errorf -+var _ = math.Inf -+ -+// This is a compile-time assertion to ensure that this generated file -+// is compatible with the proto package it is being compiled against. -+// A compilation error at this line likely means your copy of the -+// proto package needs to be updated. -+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -+ -+// DeletePlan holds all the chunks that are supposed to be deleted within an interval(usually a day) -+// This Proto file is used just for storing Delete Plans in proto format. -+type DeletePlan struct { -+ PlanInterval *Interval `protobuf:""bytes,1,opt,name=plan_interval,json=planInterval,proto3"" json:""plan_interval,omitempty""` -+ ChunksGroup []ChunksGroup `protobuf:""bytes,2,rep,name=chunks_group,json=chunksGroup,proto3"" json:""chunks_group""` -+} -+ -+func (m *DeletePlan) Reset() { *m = DeletePlan{} } -+func (*DeletePlan) ProtoMessage() {} -+func (*DeletePlan) Descriptor() ([]byte, []int) { -+ return fileDescriptor_c38868cf63b27372, []int{0} -+} -+func (m *DeletePlan) XXX_Unmarshal(b []byte) error { -+ return m.Unmarshal(b) -+} -+func (m *DeletePlan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { -+ if deterministic { -+ return xxx_messageInfo_DeletePlan.Marshal(b, m, deterministic) -+ } else { -+ b = b[:cap(b)] -+ n, err := m.MarshalToSizedBuffer(b) -+ if err != nil { -+ return nil, err -+ } -+ return b[:n], nil -+ } -+} -+func (m *DeletePlan) XXX_Merge(src proto.Message) { -+ xxx_messageInfo_DeletePlan.Merge(m, src) -+} -+func (m *DeletePlan) XXX_Size() int { -+ return m.Size() -+} -+func (m *DeletePlan) XXX_DiscardUnknown() { -+ xxx_messageInfo_DeletePlan.DiscardUnknown(m) -+} -+ -+var xxx_messageInfo_DeletePlan proto.InternalMessageInfo -+ -+func (m *DeletePlan) GetPlanInterval() *Interval { -+ if m != nil { -+ return m.PlanInterval -+ } -+ return nil -+} -+ -+func (m *DeletePlan) GetChunksGroup() []ChunksGroup { -+ if m != nil { -+ return m.ChunksGroup -+ } -+ return nil -+} -+ -+// ChunksGroup holds ChunkDetails and Labels for a group of chunks which have same series ID -+type ChunksGroup struct { -+ Labels []github_com_cortexproject_cortex_pkg_ingester_client.LabelAdapter `protobuf:""bytes,1,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/ingester/client.LabelAdapter"" json:""labels""` -+ Chunks []ChunkDetails `protobuf:""bytes,2,rep,name=chunks,proto3"" json:""chunks""` -+} -+ -+func (m *ChunksGroup) Reset() { *m = ChunksGroup{} } -+func (*ChunksGroup) ProtoMessage() {} -+func (*ChunksGroup) Descriptor() ([]byte, []int) { -+ return fileDescriptor_c38868cf63b27372, []int{1} -+} -+func (m *ChunksGroup) XXX_Unmarshal(b []byte) error { -+ return m.Unmarshal(b) -+} -+func (m *ChunksGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { -+ if deterministic { -+ return xxx_messageInfo_ChunksGroup.Marshal(b, m, deterministic) -+ } else { -+ b = b[:cap(b)] -+ n, err := m.MarshalToSizedBuffer(b) -+ if err != nil { -+ return nil, err -+ } -+ return b[:n], nil -+ } -+} -+func (m *ChunksGroup) XXX_Merge(src proto.Message) { -+ xxx_messageInfo_ChunksGroup.Merge(m, src) -+} -+func (m *ChunksGroup) XXX_Size() int { -+ return m.Size() -+} -+func (m *ChunksGroup) XXX_DiscardUnknown() { -+ xxx_messageInfo_ChunksGroup.DiscardUnknown(m) -+} -+ -+var xxx_messageInfo_ChunksGroup proto.InternalMessageInfo -+ -+func (m *ChunksGroup) GetChunks() []ChunkDetails { -+ if m != nil { -+ return m.Chunks -+ } -+ return nil -+} -+ -+type ChunkDetails struct { -+ ID string `protobuf:""bytes,1,opt,name=ID,json=iD,proto3"" json:""ID,omitempty""` -+ PartiallyDeletedInterval *Interval `protobuf:""bytes,2,opt,name=partially_deleted_interval,json=partiallyDeletedInterval,proto3"" json:""partially_deleted_interval,omitempty""` -+} -+ -+func (m *ChunkDetails) Reset() { *m = ChunkDetails{} } -+func (*ChunkDetails) ProtoMessage() {} -+func (*ChunkDetails) Descriptor() ([]byte, []int) { -+ return fileDescriptor_c38868cf63b27372, []int{2} -+} -+func (m *ChunkDetails) XXX_Unmarshal(b []byte) error { -+ return m.Unmarshal(b) -+} -+func (m *ChunkDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { -+ if deterministic { -+ return xxx_messageInfo_ChunkDetails.Marshal(b, m, deterministic) -+ } else { -+ b = b[:cap(b)] -+ n, err := m.MarshalToSizedBuffer(b) -+ if err != nil { -+ return nil, err -+ } -+ return b[:n], nil -+ } -+} -+func (m *ChunkDetails) XXX_Merge(src proto.Message) { -+ xxx_messageInfo_ChunkDetails.Merge(m, src) -+} -+func (m *ChunkDetails) XXX_Size() int { -+ return m.Size() -+} -+func (m *ChunkDetails) XXX_DiscardUnknown() { -+ xxx_messageInfo_ChunkDetails.DiscardUnknown(m) -+} -+ -+var xxx_messageInfo_ChunkDetails proto.InternalMessageInfo -+ -+func (m *ChunkDetails) GetID() string { -+ if m != nil { -+ return m.ID -+ } -+ return """" -+} -+ -+func (m *ChunkDetails) GetPartiallyDeletedInterval() *Interval { -+ if m != nil { -+ return m.PartiallyDeletedInterval -+ } -+ return nil -+} -+ -+type Interval struct { -+ StartTimestampMs int64 `protobuf:""varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3"" json:""start_timestamp_ms,omitempty""` -+ EndTimestampMs int64 `protobuf:""varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3"" json:""end_timestamp_ms,omitempty""` -+} -+ -+func (m *Interval) Reset() { *m = Interval{} } -+func (*Interval) ProtoMessage() {} -+func (*Interval) Descriptor() ([]byte, []int) { -+ return fileDescriptor_c38868cf63b27372, []int{3} -+} -+func (m *Interval) XXX_Unmarshal(b []byte) error { -+ return m.Unmarshal(b) -+} -+func (m *Interval) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { -+ if deterministic { -+ return xxx_messageInfo_Interval.Marshal(b, m, deterministic) -+ } else { -+ b = b[:cap(b)] -+ n, err := m.MarshalToSizedBuffer(b) -+ if err != nil { -+ return nil, err -+ } -+ return b[:n], nil -+ } -+} -+func (m *Interval) XXX_Merge(src proto.Message) { -+ xxx_messageInfo_Interval.Merge(m, src) -+} -+func (m *Interval) XXX_Size() int { -+ return m.Size() -+} -+func (m *Interval) XXX_DiscardUnknown() { -+ xxx_messageInfo_Interval.DiscardUnknown(m) -+} -+ -+var xxx_messageInfo_Interval proto.InternalMessageInfo -+ -+func (m *Interval) GetStartTimestampMs() int64 { -+ if m != nil { -+ return m.StartTimestampMs -+ } -+ return 0 -+} -+ -+func (m *Interval) GetEndTimestampMs() int64 { -+ if m != nil { -+ return m.EndTimestampMs -+ } -+ return 0 -+} -+ -+func init() { -+ proto.RegisterType((*DeletePlan)(nil), ""purgeplan.DeletePlan"") -+ proto.RegisterType((*ChunksGroup)(nil), ""purgeplan.ChunksGroup"") -+ proto.RegisterType((*ChunkDetails)(nil), ""purgeplan.ChunkDetails"") -+ proto.RegisterType((*Interval)(nil), ""purgeplan.Interval"") -+} -+ -+func init() { proto.RegisterFile(""delete_plan.proto"", fileDescriptor_c38868cf63b27372) } -+ -+var fileDescriptor_c38868cf63b27372 = []byte{ -+ // 454 bytes of a gzipped FileDescriptorProto -+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0x31, 0x6f, 0xd4, 0x30, -+ 0x18, 0x8d, 0x53, 0x74, 0xa2, 0xbe, 0xa3, 0x6a, 0x8d, 0x04, 0xa7, 0x1b, 0xdc, 0xea, 0xa6, 0x1b, -+ 0x20, 0x91, 0x8a, 0x90, 0x18, 0x90, 0x80, 0xe3, 0x24, 0xa8, 0x04, 0x52, 0x89, 0x98, 0x58, 0x22, -+ 0x27, 0xf9, 0x48, 0x4d, 0x7d, 0xb1, 0xb1, 0x1d, 0x04, 0x1b, 0x1b, 0x2b, 0x3f, 0x83, 0xbf, 0xc0, -+ 0x3f, 0xe8, 0x78, 0x63, 0xc5, 0x50, 0x71, 0xb9, 0x85, 0xb1, 0x3f, 0x01, 0xc5, 0xc9, 0x5d, 0x03, -+ 0x12, 0x0b, 0x5b, 0xde, 0xf7, 0xde, 0xf7, 0xfc, 0xfc, 0x62, 0xbc, 0x97, 0x81, 0x00, 0x0b, 0xb1, -+ 0x12, 0xac, 0x08, 0x94, 0x96, 0x56, 0x92, 0x6d, 0x55, 0xea, 0x1c, 0xea, 0xc1, 0xe8, 0x6e, 0xce, -+ 0xed, 0x49, 0x99, 0x04, 0xa9, 0x9c, 0x87, 0xb9, 0xcc, 0x65, 0xe8, 0x14, 0x49, 0xf9, 0xd6, 0x21, -+ 0x07, 0xdc, 0x57, 0xb3, 0x39, 0x7a, 0xdc, 0x91, 0xa7, 0x52, 0x5b, 0xf8, 0xa8, 0xb4, 0x7c, 0x07, -+ 0xa9, 0x6d, 0x51, 0xa8, 0x4e, 0xf3, 0x90, 0x17, 0x39, 0x18, 0x0b, 0x3a, 0x4c, 0x05, 0x87, 0x62, -+ 0x4d, 0x35, 0x0e, 0xe3, 0x2f, 0x08, 0xe3, 0x99, 0x4b, 0x74, 0x2c, 0x58, 0x41, 0x1e, 0xe0, 0x1b, -+ 0x75, 0x8e, 0x98, 0x17, 0x16, 0xf4, 0x07, 0x26, 0x86, 0xe8, 0x00, 0x4d, 0xfa, 0x87, 0x37, 0x83, -+ 0x4d, 0xc4, 0xe0, 0xa8, 0xa5, 0xa2, 0x41, 0x0d, 0xd7, 0x88, 0x3c, 0xc2, 0x83, 0xf4, 0xa4, 0x2c, -+ 0x4e, 0x4d, 0x9c, 0x6b, 0x59, 0xaa, 0xa1, 0x7f, 0xb0, 0x35, 0xe9, 0x1f, 0xde, 0xea, 0x2c, 0x3e, -+ 0x75, 0xf4, 0xb3, 0x9a, 0x9d, 0x5e, 0x3b, 0xbb, 0xd8, 0xf7, 0xa2, 0x7e, 0x7a, 0x35, 0x1a, 0x7f, -+ 0x47, 0xb8, 0xdf, 0x91, 0x10, 0x83, 0x7b, 0x82, 0x25, 0x20, 0xcc, 0x10, 0x39, 0xab, 0xbd, 0xa0, -+ 0x0d, 0xfe, 0xa2, 0x9e, 0x1e, 0x33, 0xae, 0xa7, 0xcf, 0x6b, 0x97, 0x1f, 0x17, 0xfb, 0xff, 0x53, -+ 0x43, 0x63, 0xf3, 0x24, 0x63, 0xca, 0x82, 0x8e, 0xda, 0xa3, 0xc8, 0x7d, 0xdc, 0x6b, 0x32, 0xb5, -+ 0xf9, 0x6f, 0xff, 0x9d, 0x7f, 0x06, 0x96, 0x71, 0x61, 0xda, 0x0b, 0xb4, 0xe2, 0xf1, 0x7b, 0x3c, -+ 0xe8, 0xb2, 0x64, 0x07, 0xfb, 0x47, 0x33, 0xd7, 0xdd, 0x76, 0xe4, 0xf3, 0x19, 0x79, 0x85, 0x47, -+ 0x8a, 0x69, 0xcb, 0x99, 0x10, 0x9f, 0xe2, 0xe6, 0x01, 0x64, 0x57, 0x1d, 0xfb, 0xff, 0xee, 0x78, -+ 0xb8, 0x59, 0x6b, 0x7e, 0x52, 0xb6, 0x66, 0xc6, 0x09, 0xbe, 0xbe, 0xe9, 0xfe, 0x0e, 0x26, 0xc6, -+ 0x32, 0x6d, 0x63, 0xcb, 0xe7, 0x60, 0x2c, 0x9b, 0xab, 0x78, 0x6e, 0xdc, 0xf1, 0x5b, 0xd1, 0xae, -+ 0x63, 0x5e, 0xaf, 0x89, 0x97, 0x86, 0x4c, 0xf0, 0x2e, 0x14, 0xd9, 0x9f, 0x5a, 0xdf, 0x69, 0x77, -+ 0xa0, 0xc8, 0x3a, 0xca, 0xe9, 0xc3, 0xc5, 0x92, 0x7a, 0xe7, 0x4b, 0xea, 0x5d, 0x2e, 0x29, 0xfa, -+ 0x5c, 0x51, 0xf4, 0xad, 0xa2, 0xe8, 0xac, 0xa2, 0x68, 0x51, 0x51, 0xf4, 0xb3, 0xa2, 0xe8, 0x57, -+ 0x45, 0xbd, 0xcb, 0x8a, 0xa2, 0xaf, 0x2b, 0xea, 0x2d, 0x56, 0xd4, 0x3b, 0x5f, 0x51, 0xef, 0x4d, -+ 0xcf, 0xdd, 0x43, 0x27, 0x3d, 0xf7, 0xc2, 0xee, 0xfd, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x6d, 0xa1, -+ 0xa8, 0x2d, 0xf2, 0x02, 0x00, 0x00, -+} -+ -+func (this *DeletePlan) Equal(that interface{}) bool { -+ if that == nil { -+ return this == nil -+ } -+ -+ that1, ok := that.(*DeletePlan) -+ if !ok { -+ that2, ok := that.(DeletePlan) -+ if ok { -+ that1 = &that2 -+ } else { -+ return false -+ } -+ } -+ if that1 == nil { -+ return this == nil -+ } else if this == nil { -+ return false -+ } -+ if !this.PlanInterval.Equal(that1.PlanInterval) { -+ return false -+ } -+ if len(this.ChunksGroup) != len(that1.ChunksGroup) { -+ return false -+ } -+ for i := range this.ChunksGroup { -+ if !this.ChunksGroup[i].Equal(&that1.ChunksGroup[i]) { -+ return false -+ } -+ } -+ return true -+} -+func (this *ChunksGroup) Equal(that interface{}) bool { -+ if that == nil { -+ return this == nil -+ } -+ -+ that1, ok := that.(*ChunksGroup) -+ if !ok { -+ that2, ok := that.(ChunksGroup) -+ if ok { -+ that1 = &that2 -+ } else { -+ return false -+ } -+ } -+ if that1 == nil { -+ return this == nil -+ } else if this == nil { -+ return false -+ } -+ if len(this.Labels) != len(that1.Labels) { -+ return false -+ } -+ for i := range this.Labels { -+ if !this.Labels[i].Equal(that1.Labels[i]) { -+ return false -+ } -+ } -+ if len(this.Chunks) != len(that1.Chunks) { -+ return false -+ } -+ for i := range this.Chunks { -+ if !this.Chunks[i].Equal(&that1.Chunks[i]) { -+ return false -+ } -+ } -+ return true -+} -+func (this *ChunkDetails) Equal(that interface{}) bool { -+ if that == nil { -+ return this == nil -+ } -+ -+ that1, ok := that.(*ChunkDetails) -+ if !ok { -+ that2, ok := that.(ChunkDetails) -+ if ok { -+ that1 = &that2 -+ } else { -+ return false -+ } -+ } -+ if that1 == nil { -+ return this == nil -+ } else if this == nil { -+ return false -+ } -+ if this.ID != that1.ID { -+ return false -+ } -+ if !this.PartiallyDeletedInterval.Equal(that1.PartiallyDeletedInterval) { -+ return false -+ } -+ return true -+} -+func (this *Interval) Equal(that interface{}) bool { -+ if that == nil { -+ return this == nil -+ } -+ -+ that1, ok := that.(*Interval) -+ if !ok { -+ that2, ok := that.(Interval) -+ if ok { -+ that1 = &that2 -+ } else { -+ return false -+ } -+ } -+ if that1 == nil { -+ return this == nil -+ } else if this == nil { -+ return false -+ } -+ if this.StartTimestampMs != that1.StartTimestampMs { -+ return false -+ } -+ if this.EndTimestampMs != that1.EndTimestampMs { -+ return false -+ } -+ return true -+} -+func (this *DeletePlan) GoString() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := make([]string, 0, 6) -+ s = append(s, ""&purger.DeletePlan{"") -+ if this.PlanInterval != nil { -+ s = append(s, ""PlanInterval: ""+fmt.Sprintf(""%#v"", this.PlanInterval)+"",\n"") -+ } -+ if this.ChunksGroup != nil { -+ vs := make([]*ChunksGroup, len(this.ChunksGroup)) -+ for i := range vs { -+ vs[i] = &this.ChunksGroup[i] -+ } -+ s = append(s, ""ChunksGroup: ""+fmt.Sprintf(""%#v"", vs)+"",\n"") -+ } -+ s = append(s, ""}"") -+ return strings.Join(s, """") -+} -+func (this *ChunksGroup) GoString() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := make([]string, 0, 6) -+ s = append(s, ""&purger.ChunksGroup{"") -+ s = append(s, ""Labels: ""+fmt.Sprintf(""%#v"", this.Labels)+"",\n"") -+ if this.Chunks != nil { -+ vs := make([]*ChunkDetails, len(this.Chunks)) -+ for i := range vs { -+ vs[i] = &this.Chunks[i] -+ } -+ s = append(s, ""Chunks: ""+fmt.Sprintf(""%#v"", vs)+"",\n"") -+ } -+ s = append(s, ""}"") -+ return strings.Join(s, """") -+} -+func (this *ChunkDetails) GoString() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := make([]string, 0, 6) -+ s = append(s, ""&purger.ChunkDetails{"") -+ s = append(s, ""ID: ""+fmt.Sprintf(""%#v"", this.ID)+"",\n"") -+ if this.PartiallyDeletedInterval != nil { -+ s = append(s, ""PartiallyDeletedInterval: ""+fmt.Sprintf(""%#v"", this.PartiallyDeletedInterval)+"",\n"") -+ } -+ s = append(s, ""}"") -+ return strings.Join(s, """") -+} -+func (this *Interval) GoString() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := make([]string, 0, 6) -+ s = append(s, ""&purger.Interval{"") -+ s = append(s, ""StartTimestampMs: ""+fmt.Sprintf(""%#v"", this.StartTimestampMs)+"",\n"") -+ s = append(s, ""EndTimestampMs: ""+fmt.Sprintf(""%#v"", this.EndTimestampMs)+"",\n"") -+ s = append(s, ""}"") -+ return strings.Join(s, """") -+} -+func valueToGoStringDeletePlan(v interface{}, typ string) string { -+ rv := reflect.ValueOf(v) -+ if rv.IsNil() { -+ return ""nil"" -+ } -+ pv := reflect.Indirect(rv).Interface() -+ return fmt.Sprintf(""func(v %v) *%v { return &v } ( %#v )"", typ, typ, pv) -+} -+func (m *DeletePlan) Marshal() (dAtA []byte, err error) { -+ size := m.Size() -+ dAtA = make([]byte, size) -+ n, err := m.MarshalToSizedBuffer(dAtA[:size]) -+ if err != nil { -+ return nil, err -+ } -+ return dAtA[:n], nil -+} -+ -+func (m *DeletePlan) MarshalTo(dAtA []byte) (int, error) { -+ size := m.Size() -+ return m.MarshalToSizedBuffer(dAtA[:size]) -+} -+ -+func (m *DeletePlan) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+ i := len(dAtA) -+ _ = i -+ var l int -+ _ = l -+ if len(m.ChunksGroup) > 0 { -+ for iNdEx := len(m.ChunksGroup) - 1; iNdEx >= 0; iNdEx-- { -+ { -+ size, err := m.ChunksGroup[iNdEx].MarshalToSizedBuffer(dAtA[:i]) -+ if err != nil { -+ return 0, err -+ } -+ i -= size -+ i = encodeVarintDeletePlan(dAtA, i, uint64(size)) -+ } -+ i-- -+ dAtA[i] = 0x12 -+ } -+ } -+ if m.PlanInterval != nil { -+ { -+ size, err := m.PlanInterval.MarshalToSizedBuffer(dAtA[:i]) -+ if err != nil { -+ return 0, err -+ } -+ i -= size -+ i = encodeVarintDeletePlan(dAtA, i, uint64(size)) -+ } -+ i-- -+ dAtA[i] = 0xa -+ } -+ return len(dAtA) - i, nil -+} -+ -+func (m *ChunksGroup) Marshal() (dAtA []byte, err error) { -+ size := m.Size() -+ dAtA = make([]byte, size) -+ n, err := m.MarshalToSizedBuffer(dAtA[:size]) -+ if err != nil { -+ return nil, err -+ } -+ return dAtA[:n], nil -+} -+ -+func (m *ChunksGroup) MarshalTo(dAtA []byte) (int, error) { -+ size := m.Size() -+ return m.MarshalToSizedBuffer(dAtA[:size]) -+} -+ -+func (m *ChunksGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+ i := len(dAtA) -+ _ = i -+ var l int -+ _ = l -+ if len(m.Chunks) > 0 { -+ for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { -+ { -+ size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) -+ if err != nil { -+ return 0, err -+ } -+ i -= size -+ i = encodeVarintDeletePlan(dAtA, i, uint64(size)) -+ } -+ i-- -+ dAtA[i] = 0x12 -+ } -+ } -+ if len(m.Labels) > 0 { -+ for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { -+ { -+ size := m.Labels[iNdEx].Size() -+ i -= size -+ if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { -+ return 0, err -+ } -+ i = encodeVarintDeletePlan(dAtA, i, uint64(size)) -+ } -+ i-- -+ dAtA[i] = 0xa -+ } -+ } -+ return len(dAtA) - i, nil -+} -+ -+func (m *ChunkDetails) Marshal() (dAtA []byte, err error) { -+ size := m.Size() -+ dAtA = make([]byte, size) -+ n, err := m.MarshalToSizedBuffer(dAtA[:size]) -+ if err != nil { -+ return nil, err -+ } -+ return dAtA[:n], nil -+} -+ -+func (m *ChunkDetails) MarshalTo(dAtA []byte) (int, error) { -+ size := m.Size() -+ return m.MarshalToSizedBuffer(dAtA[:size]) -+} -+ -+func (m *ChunkDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+ i := len(dAtA) -+ _ = i -+ var l int -+ _ = l -+ if m.PartiallyDeletedInterval != nil { -+ { -+ size, err := m.PartiallyDeletedInterval.MarshalToSizedBuffer(dAtA[:i]) -+ if err != nil { -+ return 0, err -+ } -+ i -= size -+ i = encodeVarintDeletePlan(dAtA, i, uint64(size)) -+ } -+ i-- -+ dAtA[i] = 0x12 -+ } -+ if len(m.ID) > 0 { -+ i -= len(m.ID) -+ copy(dAtA[i:], m.ID) -+ i = encodeVarintDeletePlan(dAtA, i, uint64(len(m.ID))) -+ i-- -+ dAtA[i] = 0xa -+ } -+ return len(dAtA) - i, nil -+} -+ -+func (m *Interval) Marshal() (dAtA []byte, err error) { -+ size := m.Size() -+ dAtA = make([]byte, size) -+ n, err := m.MarshalToSizedBuffer(dAtA[:size]) -+ if err != nil { -+ return nil, err -+ } -+ return dAtA[:n], nil -+} -+ -+func (m *Interval) MarshalTo(dAtA []byte) (int, error) { -+ size := m.Size() -+ return m.MarshalToSizedBuffer(dAtA[:size]) -+} -+ -+func (m *Interval) MarshalToSizedBuffer(dAtA []byte) (int, error) { -+ i := len(dAtA) -+ _ = i -+ var l int -+ _ = l -+ if m.EndTimestampMs != 0 { -+ i = encodeVarintDeletePlan(dAtA, i, uint64(m.EndTimestampMs)) -+ i-- -+ dAtA[i] = 0x10 -+ } -+ if m.StartTimestampMs != 0 { -+ i = encodeVarintDeletePlan(dAtA, i, uint64(m.StartTimestampMs)) -+ i-- -+ dAtA[i] = 0x8 -+ } -+ return len(dAtA) - i, nil -+} -+ -+func encodeVarintDeletePlan(dAtA []byte, offset int, v uint64) int { -+ offset -= sovDeletePlan(v) -+ base := offset -+ for v >= 1<<7 { -+ dAtA[offset] = uint8(v&0x7f | 0x80) -+ v >>= 7 -+ offset++ -+ } -+ dAtA[offset] = uint8(v) -+ return base -+} -+func (m *DeletePlan) Size() (n int) { -+ if m == nil { -+ return 0 -+ } -+ var l int -+ _ = l -+ if m.PlanInterval != nil { -+ l = m.PlanInterval.Size() -+ n += 1 + l + sovDeletePlan(uint64(l)) -+ } -+ if len(m.ChunksGroup) > 0 { -+ for _, e := range m.ChunksGroup { -+ l = e.Size() -+ n += 1 + l + sovDeletePlan(uint64(l)) -+ } -+ } -+ return n -+} -+ -+func (m *ChunksGroup) Size() (n int) { -+ if m == nil { -+ return 0 -+ } -+ var l int -+ _ = l -+ if len(m.Labels) > 0 { -+ for _, e := range m.Labels { -+ l = e.Size() -+ n += 1 + l + sovDeletePlan(uint64(l)) -+ } -+ } -+ if len(m.Chunks) > 0 { -+ for _, e := range m.Chunks { -+ l = e.Size() -+ n += 1 + l + sovDeletePlan(uint64(l)) -+ } -+ } -+ return n -+} -+ -+func (m *ChunkDetails) Size() (n int) { -+ if m == nil { -+ return 0 -+ } -+ var l int -+ _ = l -+ l = len(m.ID) -+ if l > 0 { -+ n += 1 + l + sovDeletePlan(uint64(l)) -+ } -+ if m.PartiallyDeletedInterval != nil { -+ l = m.PartiallyDeletedInterval.Size() -+ n += 1 + l + sovDeletePlan(uint64(l)) -+ } -+ return n -+} -+ -+func (m *Interval) Size() (n int) { -+ if m == nil { -+ return 0 -+ } -+ var l int -+ _ = l -+ if m.StartTimestampMs != 0 { -+ n += 1 + sovDeletePlan(uint64(m.StartTimestampMs)) -+ } -+ if m.EndTimestampMs != 0 { -+ n += 1 + sovDeletePlan(uint64(m.EndTimestampMs)) -+ } -+ return n -+} -+ -+func sovDeletePlan(x uint64) (n int) { -+ return (math_bits.Len64(x|1) + 6) / 7 -+} -+func sozDeletePlan(x uint64) (n int) { -+ return sovDeletePlan(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -+} -+func (this *DeletePlan) String() string { -+ if this == nil { -+ return ""nil"" -+ } -+ repeatedStringForChunksGroup := ""[]ChunksGroup{"" -+ for _, f := range this.ChunksGroup { -+ repeatedStringForChunksGroup += strings.Replace(strings.Replace(f.String(), ""ChunksGroup"", ""ChunksGroup"", 1), `&`, ``, 1) + "","" -+ } -+ repeatedStringForChunksGroup += ""}"" -+ s := strings.Join([]string{`&DeletePlan{`, -+ `PlanInterval:` + strings.Replace(this.PlanInterval.String(), ""Interval"", ""Interval"", 1) + `,`, -+ `ChunksGroup:` + repeatedStringForChunksGroup + `,`, -+ `}`, -+ }, """") -+ return s -+} -+func (this *ChunksGroup) String() string { -+ if this == nil { -+ return ""nil"" -+ } -+ repeatedStringForChunks := ""[]ChunkDetails{"" -+ for _, f := range this.Chunks { -+ repeatedStringForChunks += strings.Replace(strings.Replace(f.String(), ""ChunkDetails"", ""ChunkDetails"", 1), `&`, ``, 1) + "","" -+ } -+ repeatedStringForChunks += ""}"" -+ s := strings.Join([]string{`&ChunksGroup{`, -+ `Labels:` + fmt.Sprintf(""%v"", this.Labels) + `,`, -+ `Chunks:` + repeatedStringForChunks + `,`, -+ `}`, -+ }, """") -+ return s -+} -+func (this *ChunkDetails) String() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := strings.Join([]string{`&ChunkDetails{`, -+ `ID:` + fmt.Sprintf(""%v"", this.ID) + `,`, -+ `PartiallyDeletedInterval:` + strings.Replace(this.PartiallyDeletedInterval.String(), ""Interval"", ""Interval"", 1) + `,`, -+ `}`, -+ }, """") -+ return s -+} -+func (this *Interval) String() string { -+ if this == nil { -+ return ""nil"" -+ } -+ s := strings.Join([]string{`&Interval{`, -+ `StartTimestampMs:` + fmt.Sprintf(""%v"", this.StartTimestampMs) + `,`, -+ `EndTimestampMs:` + fmt.Sprintf(""%v"", this.EndTimestampMs) + `,`, -+ `}`, -+ }, """") -+ return s -+} -+func valueToStringDeletePlan(v interface{}) string { -+ rv := reflect.ValueOf(v) -+ if rv.IsNil() { -+ return ""nil"" -+ } -+ pv := reflect.Indirect(rv).Interface() -+ return fmt.Sprintf(""*%v"", pv) -+} -+func (m *DeletePlan) Unmarshal(dAtA []byte) error { -+ l := len(dAtA) -+ iNdEx := 0 -+ for iNdEx < l { -+ preIndex := iNdEx -+ var wire uint64 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowDeletePlan -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ wire |= uint64(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ fieldNum := int32(wire >> 3) -+ wireType := int(wire & 0x7) -+ if wireType == 4 { -+ return fmt.Errorf(""proto: DeletePlan: wiretype end group for non-group"") -+ } -+ if fieldNum <= 0 { -+ return fmt.Errorf(""proto: DeletePlan: illegal tag %d (wire type %d)"", fieldNum, wire) -+ } -+ switch fieldNum { -+ case 1: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field PlanInterval"", wireType) -+ } -+ var msglen int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowDeletePlan -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ msglen |= int(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ if msglen < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ postIndex := iNdEx + msglen -+ if postIndex < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ if m.PlanInterval == nil { -+ m.PlanInterval = &Interval{} -+ } -+ if err := m.PlanInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { -+ return err -+ } -+ iNdEx = postIndex -+ case 2: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field ChunksGroup"", wireType) -+ } -+ var msglen int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowDeletePlan -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ msglen |= int(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ if msglen < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ postIndex := iNdEx + msglen -+ if postIndex < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ m.ChunksGroup = append(m.ChunksGroup, ChunksGroup{}) -+ if err := m.ChunksGroup[len(m.ChunksGroup)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { -+ return err -+ } -+ iNdEx = postIndex -+ default: -+ iNdEx = preIndex -+ skippy, err := skipDeletePlan(dAtA[iNdEx:]) -+ if err != nil { -+ return err -+ } -+ if skippy < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ if (iNdEx + skippy) < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ if (iNdEx + skippy) > l { -+ return io.ErrUnexpectedEOF -+ } -+ iNdEx += skippy -+ } -+ } -+ -+ if iNdEx > l { -+ return io.ErrUnexpectedEOF -+ } -+ return nil -+} -+func (m *ChunksGroup) Unmarshal(dAtA []byte) error { -+ l := len(dAtA) -+ iNdEx := 0 -+ for iNdEx < l { -+ preIndex := iNdEx -+ var wire uint64 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowDeletePlan -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ wire |= uint64(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ fieldNum := int32(wire >> 3) -+ wireType := int(wire & 0x7) -+ if wireType == 4 { -+ return fmt.Errorf(""proto: ChunksGroup: wiretype end group for non-group"") -+ } -+ if fieldNum <= 0 { -+ return fmt.Errorf(""proto: ChunksGroup: illegal tag %d (wire type %d)"", fieldNum, wire) -+ } -+ switch fieldNum { -+ case 1: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Labels"", wireType) -+ } -+ var msglen int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowDeletePlan -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ msglen |= int(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ if msglen < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ postIndex := iNdEx + msglen -+ if postIndex < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ m.Labels = append(m.Labels, github_com_cortexproject_cortex_pkg_ingester_client.LabelAdapter{}) -+ if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { -+ return err -+ } -+ iNdEx = postIndex -+ case 2: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Chunks"", wireType) -+ } -+ var msglen int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowDeletePlan -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ msglen |= int(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ if msglen < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ postIndex := iNdEx + msglen -+ if postIndex < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ m.Chunks = append(m.Chunks, ChunkDetails{}) -+ if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { -+ return err -+ } -+ iNdEx = postIndex -+ default: -+ iNdEx = preIndex -+ skippy, err := skipDeletePlan(dAtA[iNdEx:]) -+ if err != nil { -+ return err -+ } -+ if skippy < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ if (iNdEx + skippy) < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ if (iNdEx + skippy) > l { -+ return io.ErrUnexpectedEOF -+ } -+ iNdEx += skippy -+ } -+ } -+ -+ if iNdEx > l { -+ return io.ErrUnexpectedEOF -+ } -+ return nil -+} -+func (m *ChunkDetails) Unmarshal(dAtA []byte) error { -+ l := len(dAtA) -+ iNdEx := 0 -+ for iNdEx < l { -+ preIndex := iNdEx -+ var wire uint64 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowDeletePlan -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ wire |= uint64(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ fieldNum := int32(wire >> 3) -+ wireType := int(wire & 0x7) -+ if wireType == 4 { -+ return fmt.Errorf(""proto: ChunkDetails: wiretype end group for non-group"") -+ } -+ if fieldNum <= 0 { -+ return fmt.Errorf(""proto: ChunkDetails: illegal tag %d (wire type %d)"", fieldNum, wire) -+ } -+ switch fieldNum { -+ case 1: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field ID"", wireType) -+ } -+ var stringLen uint64 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowDeletePlan -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ stringLen |= uint64(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ intStringLen := int(stringLen) -+ if intStringLen < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ postIndex := iNdEx + intStringLen -+ if postIndex < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ m.ID = string(dAtA[iNdEx:postIndex]) -+ iNdEx = postIndex -+ case 2: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field PartiallyDeletedInterval"", wireType) -+ } -+ var msglen int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowDeletePlan -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ msglen |= int(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ if msglen < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ postIndex := iNdEx + msglen -+ if postIndex < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ if m.PartiallyDeletedInterval == nil { -+ m.PartiallyDeletedInterval = &Interval{} -+ } -+ if err := m.PartiallyDeletedInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { -+ return err -+ } -+ iNdEx = postIndex -+ default: -+ iNdEx = preIndex -+ skippy, err := skipDeletePlan(dAtA[iNdEx:]) -+ if err != nil { -+ return err -+ } -+ if skippy < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ if (iNdEx + skippy) < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ if (iNdEx + skippy) > l { -+ return io.ErrUnexpectedEOF -+ } -+ iNdEx += skippy -+ } -+ } -+ -+ if iNdEx > l { -+ return io.ErrUnexpectedEOF -+ } -+ return nil -+} -+func (m *Interval) Unmarshal(dAtA []byte) error { -+ l := len(dAtA) -+ iNdEx := 0 -+ for iNdEx < l { -+ preIndex := iNdEx -+ var wire uint64 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowDeletePlan -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ wire |= uint64(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ fieldNum := int32(wire >> 3) -+ wireType := int(wire & 0x7) -+ if wireType == 4 { -+ return fmt.Errorf(""proto: Interval: wiretype end group for non-group"") -+ } -+ if fieldNum <= 0 { -+ return fmt.Errorf(""proto: Interval: illegal tag %d (wire type %d)"", fieldNum, wire) -+ } -+ switch fieldNum { -+ case 1: -+ if wireType != 0 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field StartTimestampMs"", wireType) -+ } -+ m.StartTimestampMs = 0 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowDeletePlan -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ m.StartTimestampMs |= int64(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ case 2: -+ if wireType != 0 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field EndTimestampMs"", wireType) -+ } -+ m.EndTimestampMs = 0 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowDeletePlan -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ m.EndTimestampMs |= int64(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ default: -+ iNdEx = preIndex -+ skippy, err := skipDeletePlan(dAtA[iNdEx:]) -+ if err != nil { -+ return err -+ } -+ if skippy < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ if (iNdEx + skippy) < 0 { -+ return ErrInvalidLengthDeletePlan -+ } -+ if (iNdEx + skippy) > l { -+ return io.ErrUnexpectedEOF -+ } -+ iNdEx += skippy -+ } -+ } -+ -+ if iNdEx > l { -+ return io.ErrUnexpectedEOF -+ } -+ return nil -+} -+func skipDeletePlan(dAtA []byte) (n int, err error) { -+ l := len(dAtA) -+ iNdEx := 0 -+ for iNdEx < l { -+ var wire uint64 -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return 0, ErrIntOverflowDeletePlan -+ } -+ if iNdEx >= l { -+ return 0, io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ wire |= (uint64(b) & 0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ wireType := int(wire & 0x7) -+ switch wireType { -+ case 0: -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return 0, ErrIntOverflowDeletePlan -+ } -+ if iNdEx >= l { -+ return 0, io.ErrUnexpectedEOF -+ } -+ iNdEx++ -+ if dAtA[iNdEx-1] < 0x80 { -+ break -+ } -+ } -+ return iNdEx, nil -+ case 1: -+ iNdEx += 8 -+ return iNdEx, nil -+ case 2: -+ var length int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return 0, ErrIntOverflowDeletePlan -+ } -+ if iNdEx >= l { -+ return 0, io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ length |= (int(b) & 0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ if length < 0 { -+ return 0, ErrInvalidLengthDeletePlan -+ } -+ iNdEx += length -+ if iNdEx < 0 { -+ return 0, ErrInvalidLengthDeletePlan -+ } -+ return iNdEx, nil -+ case 3: -+ for { -+ var innerWire uint64 -+ var start int = iNdEx -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return 0, ErrIntOverflowDeletePlan -+ } -+ if iNdEx >= l { -+ return 0, io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ innerWire |= (uint64(b) & 0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ innerWireType := int(innerWire & 0x7) -+ if innerWireType == 4 { -+ break -+ } -+ next, err := skipDeletePlan(dAtA[start:]) -+ if err != nil { -+ return 0, err -+ } -+ iNdEx = start + next -+ if iNdEx < 0 { -+ return 0, ErrInvalidLengthDeletePlan -+ } -+ } -+ return iNdEx, nil -+ case 4: -+ return iNdEx, nil -+ case 5: -+ iNdEx += 4 -+ return iNdEx, nil -+ default: -+ return 0, fmt.Errorf(""proto: illegal wireType %d"", wireType) -+ } -+ } -+ panic(""unreachable"") -+} -+ -+var ( -+ ErrInvalidLengthDeletePlan = fmt.Errorf(""proto: negative length found during unmarshaling"") -+ ErrIntOverflowDeletePlan = fmt.Errorf(""proto: integer overflow"") -+) -diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_plan.proto b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_plan.proto -new file mode 100644 -index 0000000000000..2eaf1182103b0 ---- /dev/null -+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_plan.proto -@@ -0,0 +1,34 @@ -+syntax = ""proto3""; -+ -+package purgeplan; -+ -+option go_package = ""purger""; -+ -+import ""github.com/gogo/protobuf/gogoproto/gogo.proto""; -+import ""github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto""; -+ -+option (gogoproto.marshaler_all) = true; -+option (gogoproto.unmarshaler_all) = true; -+ -+// DeletePlan holds all the chunks that are supposed to be deleted within an interval(usually a day) -+// This Proto file is used just for storing Delete Plans in proto format. -+message DeletePlan { -+ Interval plan_interval = 1; -+ repeated ChunksGroup chunks_group = 2 [(gogoproto.nullable) = false]; -+} -+ -+// ChunksGroup holds ChunkDetails and Labels for a group of chunks which have same series ID -+message ChunksGroup { -+ repeated cortex.LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = ""github.com/cortexproject/cortex/pkg/ingester/client.LabelAdapter""]; -+ repeated ChunkDetails chunks = 2 [(gogoproto.nullable) = false]; -+} -+ -+message ChunkDetails { -+ string ID = 1; -+ Interval partially_deleted_interval = 2; -+} -+ -+message Interval { -+ int64 start_timestamp_ms = 1; -+ int64 end_timestamp_ms = 2; -+} -diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/delete_requests_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_requests_store.go -similarity index 90% -rename from vendor/github.com/cortexproject/cortex/pkg/chunk/delete_requests_store.go -rename to vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_requests_store.go -index 963f1d29d088d..029fa966c4f5d 100644 ---- a/vendor/github.com/cortexproject/cortex/pkg/chunk/delete_requests_store.go -+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_requests_store.go -@@ -1,4 +1,4 @@ --package chunk -+package purger - - import ( - ""context"" -@@ -12,6 +12,8 @@ import ( - ""strings"" - ""time"" - -+ ""github.com/cortexproject/cortex/pkg/chunk"" -+ - ""github.com/prometheus/common/model"" - ""github.com/prometheus/prometheus/pkg/labels"" - ) -@@ -48,7 +50,7 @@ type DeleteRequest struct { - // DeleteStore provides all the methods required to manage lifecycle of delete request and things related to it - type DeleteStore struct { - cfg DeleteStoreConfig -- indexClient IndexClient -+ indexClient chunk.IndexClient - } - - // DeleteStoreConfig holds configuration for delete store -@@ -64,7 +66,7 @@ func (cfg *DeleteStoreConfig) RegisterFlags(f *flag.FlagSet) { - } - - // NewDeleteStore creates a store for managing delete requests --func NewDeleteStore(cfg DeleteStoreConfig, indexClient IndexClient) (*DeleteStore, error) { -+func NewDeleteStore(cfg DeleteStoreConfig, indexClient chunk.IndexClient) (*DeleteStore, error) { - ds := DeleteStore{ - cfg: cfg, - indexClient: indexClient, -@@ -108,19 +110,19 @@ func (ds *DeleteStore) AddDeleteRequest(ctx context.Context, userID string, star - - // GetDeleteRequestsByStatus returns all delete requests for given status - func (ds *DeleteStore) GetDeleteRequestsByStatus(ctx context.Context, status DeleteRequestStatus) ([]DeleteRequest, error) { -- return ds.queryDeleteRequests(ctx, []IndexQuery{{TableName: ds.cfg.RequestsTableName, ValueEqual: []byte(status)}}) -+ return ds.queryDeleteRequests(ctx, []chunk.IndexQuery{{TableName: ds.cfg.RequestsTableName, ValueEqual: []byte(status)}}) - } - - // GetDeleteRequestsForUserByStatus returns all delete requests for a user with given status - func (ds *DeleteStore) GetDeleteRequestsForUserByStatus(ctx context.Context, userID string, status DeleteRequestStatus) ([]DeleteRequest, error) { -- return ds.queryDeleteRequests(ctx, []IndexQuery{ -+ return ds.queryDeleteRequests(ctx, []chunk.IndexQuery{ - {TableName: ds.cfg.RequestsTableName, RangeValuePrefix: []byte(userID), ValueEqual: []byte(status)}, - }) - } - - // GetAllDeleteRequestsForUser returns all delete requests for a user - func (ds *DeleteStore) GetAllDeleteRequestsForUser(ctx context.Context, userID string) ([]DeleteRequest, error) { -- return ds.queryDeleteRequests(ctx, []IndexQuery{ -+ return ds.queryDeleteRequests(ctx, []chunk.IndexQuery{ - {TableName: ds.cfg.RequestsTableName, RangeValuePrefix: []byte(userID)}, - }) - } -@@ -139,7 +141,7 @@ func (ds *DeleteStore) UpdateStatus(ctx context.Context, userID, requestID strin - func (ds *DeleteStore) GetDeleteRequest(ctx context.Context, userID, requestID string) (*DeleteRequest, error) { - userIDAndRequestID := fmt.Sprintf(""%s:%s"", userID, requestID) - -- deleteRequests, err := ds.queryDeleteRequests(ctx, []IndexQuery{ -+ deleteRequests, err := ds.queryDeleteRequests(ctx, []chunk.IndexQuery{ - {TableName: ds.cfg.RequestsTableName, RangeValuePrefix: []byte(userIDAndRequestID)}, - }) - -@@ -169,9 +171,9 @@ func (ds *DeleteStore) GetPendingDeleteRequestsForUser(ctx context.Context, user - return pendingDeleteRequests, nil - } - --func (ds *DeleteStore) queryDeleteRequests(ctx context.Context, deleteQuery []IndexQuery) ([]DeleteRequest, error) { -+func (ds *DeleteStore) queryDeleteRequests(ctx context.Context, deleteQuery []chunk.IndexQuery) ([]DeleteRequest, error) { - deleteRequests := []DeleteRequest{} -- err := ds.indexClient.QueryPages(ctx, deleteQuery, func(query IndexQuery, batch ReadBatch) (shouldContinue bool) { -+ err := ds.indexClient.QueryPages(ctx, deleteQuery, func(query chunk.IndexQuery, batch chunk.ReadBatch) (shouldContinue bool) { - itr := batch.Iterator() - for itr.Next() { - userID, requestID := splitUserIDAndRequestID(string(itr.RangeValue())) -@@ -189,10 +191,10 @@ func (ds *DeleteStore) queryDeleteRequests(ctx context.Context, deleteQuery []In - } - - for i, deleteRequest := range deleteRequests { -- deleteRequestQuery := []IndexQuery{{TableName: ds.cfg.RequestsTableName, HashValue: fmt.Sprintf(""%s:%s"", deleteRequest.UserID, deleteRequest.RequestID)}} -+ deleteRequestQuery := []chunk.IndexQuery{{TableName: ds.cfg.RequestsTableName, HashValue: fmt.Sprintf(""%s:%s"", deleteRequest.UserID, deleteRequest.RequestID)}} - - var parseError error -- err := ds.indexClient.QueryPages(ctx, deleteRequestQuery, func(query IndexQuery, batch ReadBatch) (shouldContinue bool) { -+ err := ds.indexClient.QueryPages(ctx, deleteRequestQuery, func(query chunk.IndexQuery, batch chunk.ReadBatch) (shouldContinue bool) { - itr := batch.Iterator() - itr.Next() - -diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go -new file mode 100644 -index 0000000000000..b969181f966c5 ---- /dev/null -+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go -@@ -0,0 +1,541 @@ -+package purger -+ -+import ( -+ ""bytes"" -+ ""context"" -+ ""flag"" -+ ""fmt"" -+ ""io/ioutil"" -+ ""sync"" -+ ""time"" -+ -+ ""github.com/go-kit/kit/log"" -+ ""github.com/go-kit/kit/log/level"" -+ ""github.com/gogo/protobuf/proto"" -+ ""github.com/prometheus/common/model"" -+ ""github.com/prometheus/prometheus/promql"" -+ ""github.com/weaveworks/common/user"" -+ -+ ""github.com/cortexproject/cortex/pkg/chunk"" -+ ""github.com/cortexproject/cortex/pkg/ingester/client"" -+ ""github.com/cortexproject/cortex/pkg/util"" -+ ""github.com/cortexproject/cortex/pkg/util/services"" -+) -+ -+const millisecondPerDay = int64(24 * time.Hour / time.Millisecond) -+ -+type deleteRequestWithLogger struct { -+ DeleteRequest -+ logger log.Logger // logger is initialized with userID and requestID to add context to every log generated using this -+} -+ -+// Config holds config for DataPurger -+type Config struct { -+ Enable bool `yaml:""enable""` -+ NumWorkers int `yaml:""num_workers""` -+ ObjectStoreType string `yaml:""object_store_type""` -+} -+ -+// RegisterFlags registers CLI flags for Config -+func (cfg *Config) RegisterFlags(f *flag.FlagSet) { -+ f.BoolVar(&cfg.Enable, ""purger.enable"", false, ""Enable purger to allow deletion of series. Be aware that Delete series feature is still experimental"") -+ f.IntVar(&cfg.NumWorkers, ""purger.num-workers"", 2, ""Number of workers executing delete plans in parallel"") -+ f.StringVar(&cfg.ObjectStoreType, ""purger.object-store-type"", """", ""Name of the object store to use for storing delete plans"") -+} -+ -+type workerJob struct { -+ planNo int -+ userID string -+ deleteRequestID string -+ logger log.Logger -+} -+ -+// DataPurger does the purging of data which is requested to be deleted -+type DataPurger struct { -+ services.Service -+ -+ cfg Config -+ deleteStore *DeleteStore -+ chunkStore chunk.Store -+ objectClient chunk.ObjectClient -+ -+ executePlansChan chan deleteRequestWithLogger -+ workerJobChan chan workerJob -+ -+ // we would only allow processing of singe delete request at a time since delete requests touching same chunks could change the chunk IDs of partially deleted chunks -+ // and break the purge plan for other requests -+ inProcessRequestIDs map[string]string -+ inProcessRequestIDsMtx sync.RWMutex -+ -+ pendingPlansCount map[string]int // per request pending plan count -+ pendingPlansCountMtx sync.Mutex -+ -+ wg sync.WaitGroup -+} -+ -+// NewDataPurger creates a new DataPurger -+func NewDataPurger(cfg Config, deleteStore *DeleteStore, chunkStore chunk.Store, storageClient chunk.ObjectClient) (*DataPurger, error) { -+ dataPurger := DataPurger{ -+ cfg: cfg, -+ deleteStore: deleteStore, -+ chunkStore: chunkStore, -+ objectClient: storageClient, -+ executePlansChan: make(chan deleteRequestWithLogger, 50), -+ workerJobChan: make(chan workerJob, 50), -+ inProcessRequestIDs: map[string]string{}, -+ pendingPlansCount: map[string]int{}, -+ } -+ -+ dataPurger.Service = services.NewTimerService(time.Hour, dataPurger.init, dataPurger.runOneIteration, dataPurger.stop) -+ return &dataPurger, nil -+} -+ -+// Run keeps pulling delete requests for planning after initializing necessary things -+func (dp *DataPurger) runOneIteration(ctx context.Context) error { -+ err := dp.pullDeleteRequestsToPlanDeletes() -+ if err != nil { -+ level.Error(util.Logger).Log(""msg"", ""error pulling delete requests for building plans"", ""err"", err) -+ } -+ // Don't return error here, or Timer service will stop. -+ return nil -+} -+ -+// init starts workers, scheduler and then loads in process delete requests -+func (dp *DataPurger) init(ctx context.Context) error { -+ for i := 0; i < dp.cfg.NumWorkers; i++ { -+ dp.wg.Add(1) -+ go dp.worker() -+ } -+ -+ dp.wg.Add(1) -+ go dp.jobScheduler(ctx) -+ -+ return dp.loadInprocessDeleteRequests() -+} -+ -+// Stop waits until all background tasks stop. -+func (dp *DataPurger) stop(_ error) error { -+ dp.wg.Wait() -+ return nil -+} -+ -+func (dp *DataPurger) workerJobCleanup(job workerJob) { -+ err := dp.removeDeletePlan(context.Background(), job.userID, job.deleteRequestID, job.planNo) -+ if err != nil { -+ level.Error(job.logger).Log(""msg"", ""error removing delete plan"", -+ ""plan_no"", job.planNo, ""err"", err) -+ return -+ } -+ -+ dp.pendingPlansCountMtx.Lock() -+ dp.pendingPlansCount[job.deleteRequestID]-- -+ -+ if dp.pendingPlansCount[job.deleteRequestID] == 0 { -+ level.Info(job.logger).Log(""msg"", ""finished execution of all plans, cleaning up and updating status of request"") -+ -+ err := dp.deleteStore.UpdateStatus(context.Background(), job.userID, job.deleteRequestID, StatusProcessed) -+ if err != nil { -+ level.Error(job.logger).Log(""msg"", ""error updating delete request status to process"", ""err"", err) -+ } -+ -+ delete(dp.pendingPlansCount, job.deleteRequestID) -+ dp.pendingPlansCountMtx.Unlock() -+ -+ dp.inProcessRequestIDsMtx.Lock() -+ delete(dp.inProcessRequestIDs, job.userID) -+ dp.inProcessRequestIDsMtx.Unlock() -+ } else { -+ dp.pendingPlansCountMtx.Unlock() -+ } -+} -+ -+// we send all the delete plans to workerJobChan -+func (dp *DataPurger) jobScheduler(ctx context.Context) { -+ defer dp.wg.Done() -+ -+ for { -+ select { -+ case req := <-dp.executePlansChan: -+ numPlans := numPlans(req.StartTime, req.EndTime) -+ level.Info(req.logger).Log(""msg"", ""sending jobs to workers for purging data"", ""num_jobs"", numPlans) -+ -+ dp.pendingPlansCountMtx.Lock() -+ dp.pendingPlansCount[req.RequestID] = numPlans -+ dp.pendingPlansCountMtx.Unlock() -+ -+ for i := 0; i < numPlans; i++ { -+ dp.workerJobChan <- workerJob{planNo: i, userID: req.UserID, -+ deleteRequestID: req.RequestID, logger: req.logger} -+ } -+ case <-ctx.Done(): -+ close(dp.workerJobChan) -+ return -+ } -+ } -+} -+ -+func (dp *DataPurger) worker() { -+ defer dp.wg.Done() -+ -+ for job := range dp.workerJobChan { -+ err := dp.executePlan(job.userID, job.deleteRequestID, job.planNo, job.logger) -+ if err != nil { -+ level.Error(job.logger).Log(""msg"", ""error executing delete plan"", -+ ""plan_no"", job.planNo, ""err"", err) -+ continue -+ } -+ -+ dp.workerJobCleanup(job) -+ } -+} -+ -+func (dp *DataPurger) executePlan(userID, requestID string, planNo int, logger log.Logger) error { -+ logger = log.With(logger, ""plan_no"", planNo) -+ -+ plan, err := dp.getDeletePlan(context.Background(), userID, requestID, planNo) -+ if err != nil { -+ if err == chunk.ErrStorageObjectNotFound { -+ level.Info(logger).Log(""msg"", ""plan not found, must have been executed already"") -+ // this means plan was already executed and got removed. Do nothing. -+ return nil -+ } -+ return err -+ } -+ -+ level.Info(logger).Log(""msg"", ""executing plan"") -+ -+ ctx := user.InjectOrgID(context.Background(), userID) -+ -+ for i := range plan.ChunksGroup { -+ level.Debug(logger).Log(""msg"", ""deleting chunks"", ""labels"", plan.ChunksGroup[i].Labels) -+ -+ for _, chunkDetails := range plan.ChunksGroup[i].Chunks { -+ chunkRef, err := chunk.ParseExternalKey(userID, chunkDetails.ID) -+ if err != nil { -+ return err -+ } -+ -+ var partiallyDeletedInterval *model.Interval = nil -+ if chunkDetails.PartiallyDeletedInterval != nil { -+ partiallyDeletedInterval = &model.Interval{ -+ Start: model.Time(chunkDetails.PartiallyDeletedInterval.StartTimestampMs), -+ End: model.Time(chunkDetails.PartiallyDeletedInterval.EndTimestampMs), -+ } -+ } -+ -+ err = dp.chunkStore.DeleteChunk(ctx, chunkRef.From, chunkRef.Through, chunkRef.UserID, -+ chunkDetails.ID, client.FromLabelAdaptersToLabels(plan.ChunksGroup[i].Labels), partiallyDeletedInterval) -+ if err != nil { -+ if isMissingChunkErr(err) { -+ level.Error(logger).Log(""msg"", ""chunk not found for deletion. We may have already deleted it"", -+ ""chunk_id"", chunkDetails.ID) -+ continue -+ } -+ return err -+ } -+ } -+ -+ level.Debug(logger).Log(""msg"", ""deleting series"", ""labels"", plan.ChunksGroup[i].Labels) -+ -+ // this is mostly required to clean up series ids from series store -+ err := dp.chunkStore.DeleteSeriesIDs(ctx, model.Time(plan.PlanInterval.StartTimestampMs), model.Time(plan.PlanInterval.EndTimestampMs), -+ userID, client.FromLabelAdaptersToLabels(plan.ChunksGroup[i].Labels)) -+ if err != nil { -+ return err -+ } -+ } -+ -+ level.Info(logger).Log(""msg"", ""finished execution of plan"") -+ -+ return nil -+} -+ -+// we need to load all in process delete requests on startup to finish them first -+func (dp *DataPurger) loadInprocessDeleteRequests() error { -+ requestsWithBuildingPlanStatus, err := dp.deleteStore.GetDeleteRequestsByStatus(context.Background(), StatusBuildingPlan) -+ if err != nil { -+ return err -+ } -+ -+ for _, deleteRequest := range requestsWithBuildingPlanStatus { -+ req := makeDeleteRequestWithLogger(deleteRequest, util.Logger) -+ -+ level.Info(req.logger).Log(""msg"", ""loaded in process delete requests with status building plan"") -+ -+ dp.inProcessRequestIDs[deleteRequest.UserID] = deleteRequest.RequestID -+ err := dp.buildDeletePlan(req) -+ if err != nil { -+ level.Error(req.logger).Log(""msg"", ""error building delete plan"", ""err"", err) -+ } -+ -+ level.Info(req.logger).Log(""msg"", ""sending delete request for execution"") -+ dp.executePlansChan <- req -+ } -+ -+ requestsWithDeletingStatus, err := dp.deleteStore.GetDeleteRequestsByStatus(context.Background(), StatusDeleting) -+ if err != nil { -+ return err -+ } -+ -+ for _, deleteRequest := range requestsWithDeletingStatus { -+ req := makeDeleteRequestWithLogger(deleteRequest, util.Logger) -+ level.Info(req.logger).Log(""msg"", ""loaded in process delete requests with status deleting"") -+ -+ dp.inProcessRequestIDs[deleteRequest.UserID] = deleteRequest.RequestID -+ dp.executePlansChan <- req -+ } -+ -+ return nil -+} -+ -+// pullDeleteRequestsToPlanDeletes pulls delete requests which do not have their delete plans built yet and sends them for building delete plans -+// after pulling delete requests for building plans, it updates its status to StatusBuildingPlan status to avoid picking this up again next time -+func (dp *DataPurger) pullDeleteRequestsToPlanDeletes() error { -+ deleteRequests, err := dp.deleteStore.GetDeleteRequestsByStatus(context.Background(), StatusReceived) -+ if err != nil { -+ return err -+ } -+ -+ for _, deleteRequest := range deleteRequests { -+ if deleteRequest.CreatedAt.Add(24 * time.Hour).After(model.Now()) { -+ continue -+ } -+ -+ dp.inProcessRequestIDsMtx.RLock() -+ inprocessDeleteRequstID := dp.inProcessRequestIDs[deleteRequest.UserID] -+ dp.inProcessRequestIDsMtx.RUnlock() -+ -+ if inprocessDeleteRequstID != """" { -+ level.Debug(util.Logger).Log(""msg"", ""skipping delete request processing for now since another request from same user is already in process"", -+ ""inprocess_request_id"", inprocessDeleteRequstID, -+ ""skipped_request_id"", deleteRequest.RequestID, ""user_id"", deleteRequest.UserID) -+ continue -+ } -+ -+ err = dp.deleteStore.UpdateStatus(context.Background(), deleteRequest.UserID, deleteRequest.RequestID, StatusBuildingPlan) -+ if err != nil { -+ return err -+ } -+ -+ dp.inProcessRequestIDsMtx.Lock() -+ dp.inProcessRequestIDs[deleteRequest.UserID] = deleteRequest.RequestID -+ dp.inProcessRequestIDsMtx.Unlock() -+ -+ req := makeDeleteRequestWithLogger(deleteRequest, util.Logger) -+ -+ level.Info(req.logger).Log(""msg"", ""building plan for a new delete request"") -+ -+ err := dp.buildDeletePlan(req) -+ if err != nil { -+ // We do not want to remove this delete request from inProcessRequestIDs to make sure -+ // we do not move multiple deleting requests in deletion process. -+ // None of the other delete requests from the user would be considered for processing until then. -+ level.Error(req.logger).Log(""msg"", ""error building delete plan"", ""err"", err) -+ return err -+ } -+ -+ level.Info(req.logger).Log(""msg"", ""sending delete request for execution"") -+ dp.executePlansChan <- req -+ } -+ -+ return nil -+} -+ -+// buildDeletePlan builds per day delete plan for given delete requests. -+// A days plan will include chunk ids and labels of all the chunks which are supposed to be deleted. -+// Chunks are grouped together by labels to avoid storing labels repetitively. -+// After building delete plans it updates status of delete request to StatusDeleting and sends it for execution -+func (dp *DataPurger) buildDeletePlan(req deleteRequestWithLogger) error { -+ ctx := context.Background() -+ ctx = user.InjectOrgID(ctx, req.UserID) -+ -+ perDayTimeRange := splitByDay(req.StartTime, req.EndTime) -+ level.Info(req.logger).Log(""msg"", ""building delete plan"", ""num_plans"", len(perDayTimeRange)) -+ -+ plans := make([][]byte, len(perDayTimeRange)) -+ for i, planRange := range perDayTimeRange { -+ chunksGroups := []ChunksGroup{} -+ -+ for _, selector := range req.Selectors { -+ matchers, err := promql.ParseMetricSelector(selector) -+ if err != nil { -+ return err -+ } -+ -+ // ToDo: remove duplicate chunks -+ chunks, err := dp.chunkStore.Get(ctx, req.UserID, planRange.Start, planRange.End, matchers...) -+ if err != nil { -+ return err -+ } -+ -+ chunksGroups = append(chunksGroups, groupChunks(chunks, req.StartTime, req.EndTime)...) -+ } -+ -+ plan := DeletePlan{ -+ PlanInterval: &Interval{ -+ StartTimestampMs: int64(planRange.Start), -+ EndTimestampMs: int64(planRange.End), -+ }, -+ ChunksGroup: chunksGroups, -+ } -+ -+ pb, err := proto.Marshal(&plan) -+ if err != nil { -+ return err -+ } -+ -+ plans[i] = pb -+ } -+ -+ err := dp.putDeletePlans(ctx, req.UserID, req.RequestID, plans) -+ if err != nil { -+ return err -+ } -+ -+ err = dp.deleteStore.UpdateStatus(ctx, req.UserID, req.RequestID, StatusDeleting) -+ if err != nil { -+ return err -+ } -+ -+ level.Info(req.logger).Log(""msg"", ""built delete plans"", ""num_plans"", len(perDayTimeRange)) -+ -+ return nil -+} -+ -+func (dp *DataPurger) putDeletePlans(ctx context.Context, userID, requestID string, plans [][]byte) error { -+ for i, plan := range plans { -+ objectKey := buildObjectKeyForPlan(userID, requestID, i) -+ -+ err := dp.objectClient.PutObject(ctx, objectKey, bytes.NewReader(plan)) -+ if err != nil { -+ return err -+ } -+ } -+ -+ return nil -+} -+ -+func (dp *DataPurger) getDeletePlan(ctx context.Context, userID, requestID string, planNo int) (*DeletePlan, error) { -+ objectKey := buildObjectKeyForPlan(userID, requestID, planNo) -+ -+ readCloser, err := dp.objectClient.GetObject(ctx, objectKey) -+ if err != nil { -+ return nil, err -+ } -+ -+ defer readCloser.Close() -+ -+ buf, err := ioutil.ReadAll(readCloser) -+ if err != nil { -+ return nil, err -+ } -+ -+ var plan DeletePlan -+ err = proto.Unmarshal(buf, &plan) -+ if err != nil { -+ return nil, err -+ } -+ -+ return &plan, nil -+} -+ -+func (dp *DataPurger) removeDeletePlan(ctx context.Context, userID, requestID string, planNo int) error { -+ objectKey := buildObjectKeyForPlan(userID, requestID, planNo) -+ return dp.objectClient.DeleteObject(ctx, objectKey) -+} -+ -+// returns interval per plan -+func splitByDay(start, end model.Time) []model.Interval { -+ numOfDays := numPlans(start, end) -+ -+ perDayTimeRange := make([]model.Interval, numOfDays) -+ startOfNextDay := model.Time(((int64(start) / millisecondPerDay) + 1) * millisecondPerDay) -+ perDayTimeRange[0] = model.Interval{Start: start, End: startOfNextDay - 1} -+ -+ for i := 1; i < numOfDays; i++ { -+ interval := model.Interval{Start: startOfNextDay} -+ startOfNextDay += model.Time(millisecondPerDay) -+ interval.End = startOfNextDay - 1 -+ perDayTimeRange[i] = interval -+ } -+ -+ perDayTimeRange[numOfDays-1].End = end -+ -+ return perDayTimeRange -+} -+ -+func numPlans(start, end model.Time) int { -+ // rounding down start to start of the day -+ if start%model.Time(millisecondPerDay) != 0 { -+ start = model.Time((int64(start) / millisecondPerDay) * millisecondPerDay) -+ } -+ -+ // rounding up end to end of the day -+ if end%model.Time(millisecondPerDay) != 0 { -+ end = model.Time((int64(end)/millisecondPerDay)*millisecondPerDay + millisecondPerDay) -+ } -+ -+ return int(int64(end-start) / millisecondPerDay) -+} -+ -+// groups chunks together by unique label sets i.e all the chunks with same labels would be stored in a group -+// chunk details are stored in groups for each unique label set to avoid storing them repetitively for each chunk -+func groupChunks(chunks []chunk.Chunk, deleteFrom, deleteThrough model.Time) []ChunksGroup { -+ metricToChunks := make(map[string]ChunksGroup) -+ -+ for _, chk := range chunks { -+ // chunk.Metric are assumed to be sorted which should give same value from String() for same series. -+ // If they stop being sorted then in the worst case we would lose the benefit of grouping chunks to avoid storing labels repetitively. -+ metricString := chk.Metric.String() -+ group, ok := metricToChunks[metricString] -+ if !ok { -+ group = ChunksGroup{Labels: client.FromLabelsToLabelAdapters(chk.Metric)} -+ } -+ -+ chunkDetails := ChunkDetails{ID: chk.ExternalKey()} -+ -+ if deleteFrom > chk.From || deleteThrough < chk.Through { -+ partiallyDeletedInterval := Interval{StartTimestampMs: int64(chk.From), EndTimestampMs: int64(chk.Through)} -+ -+ if deleteFrom > chk.From { -+ partiallyDeletedInterval.StartTimestampMs = int64(deleteFrom) -+ } -+ -+ if deleteThrough < chk.Through { -+ partiallyDeletedInterval.EndTimestampMs = int64(deleteThrough) -+ } -+ chunkDetails.PartiallyDeletedInterval = &partiallyDeletedInterval -+ } -+ -+ group.Chunks = append(group.Chunks, chunkDetails) -+ metricToChunks[metricString] = group -+ } -+ -+ chunksGroups := make([]ChunksGroup, 0, len(metricToChunks)) -+ -+ for _, group := range metricToChunks { -+ chunksGroups = append(chunksGroups, group) -+ } -+ -+ return chunksGroups -+} -+ -+func isMissingChunkErr(err error) bool { -+ if err == chunk.ErrStorageObjectNotFound { -+ return true -+ } -+ if promqlStorageErr, ok := err.(promql.ErrStorage); ok && promqlStorageErr.Err == chunk.ErrStorageObjectNotFound { -+ return true -+ } -+ -+ return false -+} -+ -+func buildObjectKeyForPlan(userID, requestID string, planNo int) string { -+ return fmt.Sprintf(""%s:%s/%d"", userID, requestID, planNo) -+} -+ -+func makeDeleteRequestWithLogger(deleteRequest DeleteRequest, l log.Logger) deleteRequestWithLogger { -+ logger := log.With(l, ""user_id"", deleteRequest.UserID, ""request_id"", deleteRequest.RequestID) -+ return deleteRequestWithLogger{deleteRequest, logger} -+} -diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go -new file mode 100644 -index 0000000000000..30a4035e3c2e7 ---- /dev/null -+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go -@@ -0,0 +1,107 @@ -+package purger -+ -+import ( -+ ""encoding/json"" -+ ""fmt"" -+ ""net/http"" -+ -+ ""github.com/cortexproject/cortex/pkg/util"" -+ -+ ""github.com/prometheus/common/model"" -+ ""github.com/prometheus/prometheus/promql"" -+ ""github.com/weaveworks/common/user"" -+) -+ -+// DeleteRequestHandler provides handlers for delete requests -+type DeleteRequestHandler struct { -+ deleteStore *DeleteStore -+} -+ -+// NewDeleteRequestHandler creates a DeleteRequestHandler -+func NewDeleteRequestHandler(deleteStore *DeleteStore) (*DeleteRequestHandler, error) { -+ deleteMgr := DeleteRequestHandler{ -+ deleteStore: deleteStore, -+ } -+ -+ return &deleteMgr, nil -+} -+ -+// AddDeleteRequestHandler handles addition of new delete request -+func (dm *DeleteRequestHandler) AddDeleteRequestHandler(w http.ResponseWriter, r *http.Request) { -+ ctx := r.Context() -+ userID, err := user.ExtractOrgID(ctx) -+ if err != nil { -+ http.Error(w, err.Error(), http.StatusBadRequest) -+ return -+ } -+ -+ params := r.URL.Query() -+ match := params[""match[]""] -+ if len(match) == 0 { -+ http.Error(w, ""selectors not set"", http.StatusBadRequest) -+ return -+ } -+ -+ for i := range match { -+ _, err := promql.ParseMetricSelector(match[i]) -+ if err != nil { -+ http.Error(w, err.Error(), http.StatusBadRequest) -+ return -+ } -+ } -+ -+ startParam := params.Get(""start"") -+ startTime := int64(0) -+ if startParam != """" { -+ startTime, err = util.ParseTime(startParam) -+ if err != nil { -+ http.Error(w, err.Error(), http.StatusBadRequest) -+ return -+ } -+ } -+ -+ endParam := params.Get(""end"") -+ endTime := int64(model.Now()) -+ -+ if endParam != """" { -+ endTime, err = util.ParseTime(endParam) -+ if err != nil { -+ http.Error(w, err.Error(), http.StatusBadRequest) -+ return -+ } -+ -+ if endTime > int64(model.Now()) { -+ http.Error(w, ""deletes in future not allowed"", http.StatusBadRequest) -+ return -+ } -+ } -+ -+ if startTime > endTime { -+ http.Error(w, ""start time can't be greater than end time"", http.StatusBadRequest) -+ return -+ } -+ -+ if err := dm.deleteStore.AddDeleteRequest(ctx, userID, model.Time(startTime), model.Time(endTime), match); err != nil { -+ http.Error(w, err.Error(), http.StatusInternalServerError) -+ } -+} -+ -+// GetAllDeleteRequestsHandler handles get all delete requests -+func (dm *DeleteRequestHandler) GetAllDeleteRequestsHandler(w http.ResponseWriter, r *http.Request) { -+ ctx := r.Context() -+ userID, err := user.ExtractOrgID(ctx) -+ if err != nil { -+ http.Error(w, err.Error(), http.StatusBadRequest) -+ return -+ } -+ -+ deleteRequests, err := dm.deleteStore.GetAllDeleteRequestsForUser(ctx, userID) -+ if err != nil { -+ http.Error(w, err.Error(), http.StatusInternalServerError) -+ return -+ } -+ -+ if err := json.NewEncoder(w).Encode(deleteRequests); err != nil { -+ http.Error(w, fmt.Sprintf(""Error marshalling response: %v"", err), http.StatusInternalServerError) -+ } -+} -diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go -new file mode 100644 -index 0000000000000..6741d0d40bfaf ---- /dev/null -+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go -@@ -0,0 +1,293 @@ -+package purger -+ -+import ( -+ ""context"" -+ ""sort"" -+ ""sync"" -+ ""time"" -+ -+ ""github.com/go-kit/kit/log/level"" -+ ""github.com/prometheus/common/model"" -+ ""github.com/prometheus/prometheus/pkg/labels"" -+ ""github.com/prometheus/prometheus/promql"" -+ -+ ""github.com/cortexproject/cortex/pkg/util"" -+) -+ -+const tombstonesReloadDuration = 5 * time.Minute -+ -+// TombstonesSet holds all the pending delete requests for a user -+type TombstonesSet struct { -+ tombstones []DeleteRequest -+ oldestTombstoneStart, newestTombstoneEnd model.Time // Used as optimization to find whether we want to iterate over tombstones or not -+} -+ -+// TombstonesLoader loads delete requests and gen numbers from store and keeps checking for updates. -+// It keeps checking for changes in gen numbers, which also means changes in delete requests and reloads specific users delete requests. -+type TombstonesLoader struct { -+ tombstones map[string]*TombstonesSet -+ tombstonesMtx sync.RWMutex -+ -+ deleteStore *DeleteStore -+ quit chan struct{} -+} -+ -+// NewTombstonesLoader creates a TombstonesLoader -+func NewTombstonesLoader(deleteStore *DeleteStore) *TombstonesLoader { -+ tl := TombstonesLoader{ -+ tombstones: map[string]*TombstonesSet{}, -+ deleteStore: deleteStore, -+ } -+ go tl.loop() -+ -+ return &tl -+} -+ -+// Stop stops TombstonesLoader -+func (tl *TombstonesLoader) Stop() { -+ close(tl.quit) -+} -+ -+func (tl *TombstonesLoader) loop() { -+ tombstonesReloadTimer := time.NewTicker(tombstonesReloadDuration) -+ for { -+ select { -+ case <-tombstonesReloadTimer.C: -+ err := tl.reloadTombstones() -+ if err != nil { -+ level.Error(util.Logger).Log(""msg"", ""error reloading tombstones"", ""err"", err) -+ } -+ case <-tl.quit: -+ return -+ } -+ } -+} -+ -+func (tl *TombstonesLoader) reloadTombstones() error { -+ // check for updates in loaded gen numbers -+ tl.tombstonesMtx.Lock() -+ -+ userIDs := make([]string, 0, len(tl.tombstones)) -+ for userID := range tl.tombstones { -+ userIDs = append(userIDs, userID) -+ } -+ -+ tl.tombstonesMtx.Unlock() -+ -+ // for all the updated gen numbers, reload delete requests -+ for _, userID := range userIDs { -+ err := tl.loadPendingTombstones(userID) -+ if err != nil { -+ return err -+ } -+ } -+ -+ return nil -+} -+ -+// GetPendingTombstones returns all pending tombstones -+func (tl *TombstonesLoader) GetPendingTombstones(userID string) (*TombstonesSet, error) { -+ tl.tombstonesMtx.RLock() -+ -+ tombstoneSet, isOK := tl.tombstones[userID] -+ if isOK { -+ tl.tombstonesMtx.RUnlock() -+ return tombstoneSet, nil -+ } -+ -+ tl.tombstonesMtx.RUnlock() -+ err := tl.loadPendingTombstones(userID) -+ if err != nil { -+ return nil, err -+ } -+ -+ tl.tombstonesMtx.RLock() -+ defer tl.tombstonesMtx.RUnlock() -+ -+ return tl.tombstones[userID], nil -+} -+ -+// GetPendingTombstones returns all pending tombstones -+func (tl *TombstonesLoader) GetPendingTombstonesForInterval(userID string, from, to model.Time) (*TombstonesSet, error) { -+ allTombstones, err := tl.GetPendingTombstones(userID) -+ if err != nil { -+ return nil, err -+ } -+ -+ if !allTombstones.HasTombstonesForInterval(from, to) { -+ return &TombstonesSet{}, nil -+ } -+ -+ filteredSet := TombstonesSet{oldestTombstoneStart: model.Now()} -+ -+ for _, tombstone := range allTombstones.tombstones { -+ if !intervalsOverlap(model.Interval{Start: from, End: to}, model.Interval{Start: tombstone.StartTime, End: tombstone.EndTime}) { -+ continue -+ } -+ -+ filteredSet.tombstones = append(filteredSet.tombstones, tombstone) -+ -+ if tombstone.StartTime < filteredSet.oldestTombstoneStart { -+ filteredSet.oldestTombstoneStart = tombstone.StartTime -+ } -+ -+ if tombstone.EndTime > filteredSet.newestTombstoneEnd { -+ filteredSet.newestTombstoneEnd = tombstone.EndTime -+ } -+ } -+ -+ return &filteredSet, nil -+} -+ -+func (tl *TombstonesLoader) loadPendingTombstones(userID string) error { -+ if tl.deleteStore == nil { -+ tl.tombstonesMtx.Lock() -+ defer tl.tombstonesMtx.Unlock() -+ -+ tl.tombstones[userID] = &TombstonesSet{oldestTombstoneStart: 0, newestTombstoneEnd: 0} -+ return nil -+ } -+ -+ pendingDeleteRequests, err := tl.deleteStore.GetPendingDeleteRequestsForUser(context.Background(), userID) -+ if err != nil { -+ return err -+ } -+ -+ tombstoneSet := TombstonesSet{tombstones: pendingDeleteRequests, oldestTombstoneStart: model.Now()} -+ for i := range tombstoneSet.tombstones { -+ tombstoneSet.tombstones[i].Matchers = make([][]*labels.Matcher, len(tombstoneSet.tombstones[i].Selectors)) -+ -+ for j, selector := range tombstoneSet.tombstones[i].Selectors { -+ tombstoneSet.tombstones[i].Matchers[j], err = promql.ParseMetricSelector(selector) -+ -+ if err != nil { -+ return err -+ } -+ } -+ -+ if tombstoneSet.tombstones[i].StartTime < tombstoneSet.oldestTombstoneStart { -+ tombstoneSet.oldestTombstoneStart = tombstoneSet.tombstones[i].StartTime -+ } -+ -+ if tombstoneSet.tombstones[i].EndTime > tombstoneSet.newestTombstoneEnd { -+ tombstoneSet.newestTombstoneEnd = tombstoneSet.tombstones[i].EndTime -+ } -+ } -+ -+ tl.tombstonesMtx.Lock() -+ defer tl.tombstonesMtx.Unlock() -+ tl.tombstones[userID] = &tombstoneSet -+ -+ return nil -+} -+ -+// GetDeletedIntervals returns non-overlapping, sorted deleted intervals. -+func (ts TombstonesSet) GetDeletedIntervals(lbls labels.Labels, from, to model.Time) []model.Interval { -+ if len(ts.tombstones) == 0 || to < ts.oldestTombstoneStart || from > ts.newestTombstoneEnd { -+ return nil -+ } -+ -+ var deletedIntervals []model.Interval -+ requestedInterval := model.Interval{Start: from, End: to} -+ -+ for i := range ts.tombstones { -+ overlaps, overlappingInterval := getOverlappingInterval(requestedInterval, -+ model.Interval{Start: ts.tombstones[i].StartTime, End: ts.tombstones[i].EndTime}) -+ -+ if !overlaps { -+ continue -+ } -+ -+ matches := false -+ for _, matchers := range ts.tombstones[i].Matchers { -+ if labels.Selector(matchers).Matches(lbls) { -+ matches = true -+ break -+ } -+ } -+ -+ if !matches { -+ continue -+ } -+ -+ if overlappingInterval == requestedInterval { -+ // whole interval deleted -+ return []model.Interval{requestedInterval} -+ } -+ -+ deletedIntervals = append(deletedIntervals, overlappingInterval) -+ } -+ -+ if len(deletedIntervals) == 0 { -+ return nil -+ } -+ -+ return mergeIntervals(deletedIntervals) -+} -+ -+// Len returns number of tombstones that are there -+func (ts TombstonesSet) Len() int { -+ return len(ts.tombstones) -+} -+ -+// HasTombstonesForInterval tells whether there are any tombstones which overlapping given interval -+func (ts TombstonesSet) HasTombstonesForInterval(from, to model.Time) bool { -+ if len(ts.tombstones) == 0 || to < ts.oldestTombstoneStart || from > ts.newestTombstoneEnd { -+ return false -+ } -+ -+ return true -+} -+ -+// sorts and merges overlapping intervals -+func mergeIntervals(intervals []model.Interval) []model.Interval { -+ if len(intervals) <= 1 { -+ return intervals -+ } -+ -+ mergedIntervals := make([]model.Interval, 0, len(intervals)) -+ sort.Slice(intervals, func(i, j int) bool { -+ return intervals[i].Start < intervals[j].Start -+ }) -+ -+ ongoingTrFrom, ongoingTrTo := intervals[0].Start, intervals[0].End -+ for i := 1; i < len(intervals); i++ { -+ // if there is no overlap add it to mergedIntervals -+ if intervals[i].Start > ongoingTrTo { -+ mergedIntervals = append(mergedIntervals, model.Interval{Start: ongoingTrFrom, End: ongoingTrTo}) -+ ongoingTrFrom = intervals[i].Start -+ ongoingTrTo = intervals[i].End -+ continue -+ } -+ -+ // there is an overlap but check whether existing time range is bigger than the current one -+ if intervals[i].End > ongoingTrTo { -+ ongoingTrTo = intervals[i].End -+ } -+ } -+ -+ // add the last time range -+ mergedIntervals = append(mergedIntervals, model.Interval{Start: ongoingTrFrom, End: ongoingTrTo}) -+ -+ return mergedIntervals -+} -+ -+func getOverlappingInterval(interval1, interval2 model.Interval) (bool, model.Interval) { -+ if interval2.Start > interval1.Start { -+ interval1.Start = interval2.Start -+ } -+ -+ if interval2.End < interval1.End { -+ interval1.End = interval2.End -+ } -+ -+ return interval1.Start < interval1.End, interval1 -+} -+ -+func intervalsOverlap(interval1, interval2 model.Interval) bool { -+ if interval1.Start > interval2.End || interval2.Start > interval1.End { -+ return false -+ } -+ -+ return true -+} -diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go -index ab4cd4247ab8e..d98f361cb99a3 100644 ---- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go -+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go -@@ -290,9 +290,42 @@ func (cfg *PeriodConfig) dailyBuckets(from, through model.Time, userID string) [ - - // PeriodicTableConfig is configuration for a set of time-sharded tables. - type PeriodicTableConfig struct { -- Prefix string `yaml:""prefix""` -- Period time.Duration `yaml:""period,omitempty""` -- Tags Tags `yaml:""tags,omitempty""` -+ Prefix string -+ Period time.Duration -+ Tags Tags -+} -+ -+// UnmarshalYAML implements the yaml.Unmarshaler interface. -+func (cfg *PeriodicTableConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { -+ g := struct { -+ Prefix string `yaml:""prefix""` -+ Period model.Duration `yaml:""period""` -+ Tags Tags `yaml:""tags""` -+ }{} -+ if err := unmarshal(&g); err != nil { -+ return err -+ } -+ -+ cfg.Prefix = g.Prefix -+ cfg.Period = time.Duration(g.Period) -+ cfg.Tags = g.Tags -+ -+ return nil -+} -+ -+// MarshalYAML implements the yaml.Marshaler interface. -+func (cfg PeriodicTableConfig) MarshalYAML() (interface{}, error) { -+ g := &struct { -+ Prefix string `yaml:""prefix""` -+ Period model.Duration `yaml:""period""` -+ Tags Tags `yaml:""tags""` -+ }{ -+ Prefix: cfg.Prefix, -+ Period: model.Duration(cfg.Period), -+ Tags: cfg.Tags, -+ } -+ -+ return g, nil - } - - // AutoScalingConfig for DynamoDB tables. -diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go -index b358b2f38f4d6..e46fffee3d661 100644 ---- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go -+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go -@@ -18,6 +18,7 @@ import ( - ""github.com/cortexproject/cortex/pkg/chunk/gcp"" - ""github.com/cortexproject/cortex/pkg/chunk/local"" - ""github.com/cortexproject/cortex/pkg/chunk/objectclient"" -+ ""github.com/cortexproject/cortex/pkg/chunk/purger"" - ""github.com/cortexproject/cortex/pkg/util"" - ) - -@@ -62,7 +63,7 @@ type Config struct { - - IndexQueriesCacheConfig cache.Config `yaml:""index_queries_cache_config,omitempty""` - -- DeleteStoreConfig chunk.DeleteStoreConfig `yaml:""delete_store,omitempty""` -+ DeleteStoreConfig purger.DeleteStoreConfig `yaml:""delete_store,omitempty""` - } - - // RegisterFlags adds the flags required to configure this flag set. -diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go -index 4cc65ad239d89..baee41e74bbed 100644 ---- a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go -+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go -@@ -29,10 +29,11 @@ const ( - ) - - type tableManagerMetrics struct { -- syncTableDuration *prometheus.HistogramVec -- tableCapacity *prometheus.GaugeVec -- createFailures prometheus.Gauge -- deleteFailures prometheus.Gauge -+ syncTableDuration *prometheus.HistogramVec -+ tableCapacity *prometheus.GaugeVec -+ createFailures prometheus.Gauge -+ deleteFailures prometheus.Gauge -+ lastSuccessfulSync prometheus.Gauge - } - - func newTableManagerMetrics(r prometheus.Registerer) *tableManagerMetrics { -@@ -61,12 +62,19 @@ func newTableManagerMetrics(r prometheus.Registerer) *tableManagerMetrics { - Help: ""Number of table deletion failures during the last table-manager reconciliation"", - }) - -+ m.lastSuccessfulSync = prometheus.NewGauge(prometheus.GaugeOpts{ -+ Namespace: ""cortex"", -+ Name: ""table_manager_sync_success_timestamp_seconds"", -+ Help: ""Timestamp of the last successful table manager sync."", -+ }) -+ - if r != nil { - r.MustRegister( - m.syncTableDuration, - m.tableCapacity, - m.createFailures, - m.deleteFailures, -+ m.lastSuccessfulSync, - ) - } - -@@ -82,7 +90,9 @@ type TableManagerConfig struct { - RetentionDeletesEnabled bool `yaml:""retention_deletes_enabled""` - - // How far back tables will be kept before they are deleted -- RetentionPeriod time.Duration `yaml:""retention_period""` -+ RetentionPeriod time.Duration `yaml:""-""` -+ // This is so that we can accept 1w, 1y in the YAML. -+ RetentionPeriodModel model.Duration `yaml:""retention_period""` - - // Period with which the table manager will poll for tables. - DynamoDBPollInterval time.Duration `yaml:""dynamodb_poll_interval""` -@@ -94,6 +104,41 @@ type TableManagerConfig struct { - ChunkTables ProvisionConfig `yaml:""chunk_tables_provisioning""` - } - -+// UnmarshalYAML implements the yaml.Unmarshaler interface. To support RetentionPeriod. -+func (cfg *TableManagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { -+ -+ // If we call unmarshal on TableManagerConfig, it will call UnmarshalYAML leading to infinite recursion. -+ // To make unmarshal fill the plain data struct rather than calling UnmarshalYAML -+ // again, we have to hide it using a type indirection. -+ type plain TableManagerConfig -+ if err := unmarshal((*plain)(cfg)); err != nil { -+ return err -+ } -+ -+ if cfg.RetentionPeriodModel > 0 { -+ cfg.RetentionPeriod = time.Duration(cfg.RetentionPeriodModel) -+ } -+ -+ return nil -+} -+ -+// MarshalYAML implements the yaml.Marshaler interface. To support RetentionPeriod. -+func (cfg *TableManagerConfig) MarshalYAML() (interface{}, error) { -+ cfg.RetentionPeriodModel = model.Duration(cfg.RetentionPeriod) -+ return cfg, nil -+} -+ -+// Validate validates the config. -+func (cfg *TableManagerConfig) Validate() error { -+ // We're setting this field because when using flags, you set the RetentionPeriodModel but not RetentionPeriod. -+ // TODO(gouthamve): Its a hack, but I can't think of any other way :/ -+ if cfg.RetentionPeriodModel > 0 { -+ cfg.RetentionPeriod = time.Duration(cfg.RetentionPeriodModel) -+ } -+ -+ return nil -+} -+ - // ProvisionConfig holds config for provisioning capacity (on DynamoDB) - type ProvisionConfig struct { - ProvisionedThroughputOnDemandMode bool `yaml:""provisioned_throughput_on_demand_mode""` -@@ -115,7 +160,7 @@ type ProvisionConfig struct { - func (cfg *TableManagerConfig) RegisterFlags(f *flag.FlagSet) { - f.BoolVar(&cfg.ThroughputUpdatesDisabled, ""table-manager.throughput-updates-disabled"", false, ""If true, disable all changes to DB capacity"") - f.BoolVar(&cfg.RetentionDeletesEnabled, ""table-manager.retention-deletes-enabled"", false, ""If true, enables retention deletes of DB tables"") -- f.DurationVar(&cfg.RetentionPeriod, ""table-manager.retention-period"", 0, ""Tables older than this retention period are deleted. Note: This setting is destructive to data!(default: 0, which disables deletion)"") -+ f.Var(&cfg.RetentionPeriodModel, ""table-manager.retention-period"", ""Tables older than this retention period are deleted. Note: This setting is destructive to data!(default: 0, which disables deletion)"") - f.DurationVar(&cfg.DynamoDBPollInterval, ""dynamodb.poll-interval"", 2*time.Minute, ""How frequently to poll DynamoDB to learn our capacity."") - f.DurationVar(&cfg.CreationGracePeriod, ""dynamodb.periodic-table.grace-period"", 10*time.Minute, ""DynamoDB periodic tables grace period (duration which table will be created/deleted before/after it's needed)."") - -@@ -259,7 +304,12 @@ func (m *TableManager) SyncTables(ctx context.Context) error { - return err - } - -- return m.updateTables(ctx, toCheckThroughput) -+ if err := m.updateTables(ctx, toCheckThroughput); err != nil { -+ return err -+ } -+ -+ m.metrics.lastSuccessfulSync.SetToCurrentTime() -+ return nil - } - - func (m *TableManager) calculateExpectedTables() []TableDesc { -diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go -index a046cced72341..92dd0dcaf4594 100644 ---- a/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go -+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go -@@ -143,22 +143,6 @@ func SetupTestChunkStore() (chunk.Store, error) { - return store, nil - } - --func SetupTestDeleteStore() (*chunk.DeleteStore, error) { -- var deleteStoreConfig chunk.DeleteStoreConfig -- flagext.DefaultValues(&deleteStoreConfig) -- -- mockStorage := chunk.NewMockStorage() -- -- err := mockStorage.CreateTable(context.Background(), chunk.TableDesc{ -- Name: deleteStoreConfig.RequestsTableName, -- }) -- if err != nil { -- return nil, err -- } -- -- return chunk.NewDeleteStore(deleteStoreConfig, mockStorage) --} -- - func SetupTestObjectStore() (chunk.ObjectClient, error) { - return chunk.NewMockStorage(), nil - } -diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go -index aaaf8f73fcf6f..45e3cb1dffc1d 100644 ---- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go -+++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go -@@ -9,6 +9,7 @@ import ( - ""time"" - - opentracing ""github.com/opentracing/opentracing-go"" -+ ""github.com/pkg/errors"" - ""github.com/prometheus/client_golang/prometheus"" - ""github.com/prometheus/client_golang/prometheus/promauto"" - ""github.com/prometheus/common/model"" -@@ -112,7 +113,8 @@ type Distributor struct { - ingestionRateLimiter *limiter.RateLimiter - - // Manager for subservices (HA Tracker, distributor ring and client pool) -- subservices *services.Manager -+ subservices *services.Manager -+ subservicesWatcher *services.FailureWatcher - } - - // Config contains the configuration require to -@@ -208,8 +210,10 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove - if err != nil { - return nil, err - } -+ d.subservicesWatcher = services.NewFailureWatcher() -+ d.subservicesWatcher.WatchManager(d.subservices) - -- d.Service = services.NewIdleService(d.starting, d.stopping) -+ d.Service = services.NewBasicService(d.starting, d.running, d.stopping) - return d, nil - } - -@@ -218,6 +222,15 @@ func (d *Distributor) starting(ctx context.Context) error { - return services.StartManagerAndAwaitHealthy(ctx, d.subservices) - } - -+func (d *Distributor) running(ctx context.Context) error { -+ select { -+ case <-ctx.Done(): -+ return nil -+ case err := <-d.subservicesWatcher.Chan(): -+ return errors.Wrap(err, ""distributor subservice failed"") -+ } -+} -+ - // Called after distributor is asked to stop via StopAsync. - func (d *Distributor) stopping(_ error) error { - return services.StopManagerAndAwaitStopped(context.Background(), d.subservices) -diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go -index e10cc11158e50..76e81fb564cf3 100644 ---- a/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go -+++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go -@@ -3,7 +3,6 @@ package distributor - import ( - ""context"" - ""io"" -- ""sort"" - - ""github.com/prometheus/common/model"" - ""github.com/prometheus/prometheus/pkg/labels"" -@@ -16,6 +15,7 @@ import ( - ""github.com/cortexproject/cortex/pkg/ring"" - ""github.com/cortexproject/cortex/pkg/util"" - ""github.com/cortexproject/cortex/pkg/util/extract"" -+ grpc_util ""github.com/cortexproject/cortex/pkg/util/grpc"" - ) - - // Query multiple ingesters and returns a Matrix of samples. -@@ -145,6 +145,11 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSet ri - if err == io.EOF { - break - } else if err != nil { -+ // Do not track a failure if the context was canceled. -+ if !grpc_util.IsGRPCContextCanceled(err) { -+ ingesterQueryFailures.WithLabelValues(ing.Addr).Inc() -+ } -+ - return nil, err - } - -@@ -177,7 +182,11 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSet ri - hash := client.FastFingerprint(series.Labels) - existing := hashToTimeSeries[hash] - existing.Labels = series.Labels -- existing.Samples = append(existing.Samples, series.Samples...) -+ if existing.Samples == nil { -+ existing.Samples = series.Samples -+ } else { -+ existing.Samples = mergeSamples(existing.Samples, series.Samples) -+ } - hashToTimeSeries[hash] = existing - } - } -@@ -190,15 +199,48 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSet ri - resp.Chunkseries = append(resp.Chunkseries, series) - } - for _, series := range hashToTimeSeries { -- sort.Sort(byTimestamp(series.Samples)) - resp.Timeseries = append(resp.Timeseries, series) - } - - return resp, nil - } - --type byTimestamp []client.Sample -+// Merges and dedupes two sorted slices with samples together. -+func mergeSamples(a, b []ingester_client.Sample) []ingester_client.Sample { -+ if sameSamples(a, b) { -+ return a -+ } - --func (b byTimestamp) Len() int { return len(b) } --func (b byTimestamp) Swap(i, j int) { b[i], b[j] = b[j], b[i] } --func (b byTimestamp) Less(i, j int) bool { return b[i].TimestampMs < b[j].TimestampMs } -+ result := make([]ingester_client.Sample, 0, len(a)+len(b)) -+ i, j := 0, 0 -+ for i < len(a) && j < len(b) { -+ if a[i].TimestampMs < b[j].TimestampMs { -+ result = append(result, a[i]) -+ i++ -+ } else if a[i].TimestampMs > b[j].TimestampMs { -+ result = append(result, b[j]) -+ j++ -+ } else { -+ result = append(result, a[i]) -+ i++ -+ j++ -+ } -+ } -+ // Add the rest of a or b. One of them is empty now. -+ result = append(result, a[i:]...) -+ result = append(result, b[j:]...) -+ return result -+} -+ -+func sameSamples(a, b []ingester_client.Sample) bool { -+ if len(a) != len(b) { -+ return false -+ } -+ -+ for i := 0; i < len(a); i++ { -+ if a[i] != b[i] { -+ return false -+ } -+ } -+ return true -+} -diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex_util.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex_util.go -new file mode 100644 -index 0000000000000..433ff141fdcd1 ---- /dev/null -+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex_util.go -@@ -0,0 +1,49 @@ -+package client -+ -+import ( -+ context ""context"" -+) -+ -+// SendQueryStream wraps the stream's Send() checking if the context is done -+// before calling Send(). -+func SendQueryStream(s Ingester_QueryStreamServer, m *QueryStreamResponse) error { -+ return sendWithContextErrChecking(s.Context(), func() error { -+ return s.Send(m) -+ }) -+} -+ -+// SendTimeSeriesChunk wraps the stream's Send() checking if the context is done -+// before calling Send(). -+func SendTimeSeriesChunk(s Ingester_TransferChunksClient, m *TimeSeriesChunk) error { -+ return sendWithContextErrChecking(s.Context(), func() error { -+ return s.Send(m) -+ }) -+} -+ -+// SendTimeSeriesFile wraps the stream's Send() checking if the context is done -+// before calling Send(). -+func SendTimeSeriesFile(s Ingester_TransferTSDBClient, m *TimeSeriesFile) error { -+ return sendWithContextErrChecking(s.Context(), func() error { -+ return s.Send(m) -+ }) -+} -+ -+func sendWithContextErrChecking(ctx context.Context, send func() error) error { -+ // If the context has been canceled or its deadline exceeded, we should return it -+ // instead of the cryptic error the Send() will return. -+ if ctxErr := ctx.Err(); ctxErr != nil { -+ return ctxErr -+ } -+ -+ if err := send(); err != nil { -+ // Experimentally, we've seen the context switching to done after the Send() -+ // has been called, so here we do recheck the context in case of error. -+ if ctxErr := ctx.Err(); ctxErr != nil { -+ return ctxErr -+ } -+ -+ return err -+ } -+ -+ return nil -+} -diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go -index f1a3b4a080895..a20bb3eeeafb6 100644 ---- a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go -+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go -@@ -10,6 +10,7 @@ import ( - - ""github.com/go-kit/kit/log"" - ""github.com/go-kit/kit/log/level"" -+ ""github.com/pkg/errors"" - ""github.com/weaveworks/common/httpgrpc"" - ""github.com/weaveworks/common/httpgrpc/server"" - ""github.com/weaveworks/common/middleware"" -@@ -18,6 +19,7 @@ import ( - - ""github.com/cortexproject/cortex/pkg/util"" - ""github.com/cortexproject/cortex/pkg/util/grpcclient"" -+ ""github.com/cortexproject/cortex/pkg/util/services"" - ) - - var ( -@@ -46,31 +48,21 @@ func (cfg *WorkerConfig) RegisterFlags(f *flag.FlagSet) { - } - - // Worker is the counter-part to the frontend, actually processing requests. --type Worker interface { -- Stop() --} -- - type worker struct { - cfg WorkerConfig - log log.Logger - server *server.Server - -- ctx context.Context -- cancel context.CancelFunc - watcher naming.Watcher //nolint:staticcheck //Skipping for now. If you still see this more than likely issue https://github.com/cortexproject/cortex/issues/2015 has not yet been addressed. - wg sync.WaitGroup - } - --type noopWorker struct { --} -- --func (noopWorker) Stop() {} -- --// NewWorker creates a new Worker. --func NewWorker(cfg WorkerConfig, server *server.Server, log log.Logger) (Worker, error) { -+// NewWorker creates a new worker and returns a service that is wrapping it. -+// If no address is specified, it returns nil service (and no error). -+func NewWorker(cfg WorkerConfig, server *server.Server, log log.Logger) (services.Service, error) { - if cfg.Address == """" { - level.Info(log).Log(""msg"", ""no address specified, not starting worker"") -- return noopWorker{}, nil -+ return nil, nil - } - - resolver, err := naming.NewDNSResolverWithFreq(cfg.DNSLookupDuration) -@@ -83,52 +75,50 @@ func NewWorker(cfg WorkerConfig, server *server.Server, log log.Logger) (Worker, - return nil, err - } - -- ctx, cancel := context.WithCancel(context.Background()) -- - w := &worker{ -- cfg: cfg, -- log: log, -- server: server, -- -- ctx: ctx, -- cancel: cancel, -+ cfg: cfg, -+ log: log, -+ server: server, - watcher: watcher, - } -- w.wg.Add(1) -- go w.watchDNSLoop() -- return w, nil -+ return services.NewBasicService(nil, w.watchDNSLoop, w.stopping), nil - } - --// Stop the worker. --func (w *worker) Stop() { -- w.watcher.Close() -- w.cancel() -+func (w *worker) stopping(_ error) error { -+ // wait until all per-address workers are done. This is only called after watchDNSLoop exits. - w.wg.Wait() -+ return nil - } - - // watchDNSLoop watches for changes in DNS and starts or stops workers. --func (w *worker) watchDNSLoop() { -- defer w.wg.Done() -+func (w *worker) watchDNSLoop(servCtx context.Context) error { -+ go func() { -+ // Close the watcher, when this service is asked to stop. -+ // Closing the watcher makes watchDNSLoop exit, since it only iterates on watcher updates, and has no other -+ // way to stop. We cannot close the watcher in `stopping` method, because it is only called *after* -+ // watchDNSLoop exits. -+ <-servCtx.Done() -+ w.watcher.Close() -+ }() - - cancels := map[string]context.CancelFunc{} -- defer func() { -- for _, cancel := range cancels { -- cancel() -- } -- }() - - for { - updates, err := w.watcher.Next() - if err != nil { -- level.Error(w.log).Log(""msg"", ""error from DNS watcher"", ""err"", err) -- return -+ // watcher.Next returns error when Close is called, but we call Close when our context is done. -+ // we don't want to report error in that case. -+ if servCtx.Err() != nil { -+ return nil -+ } -+ return errors.Wrapf(err, ""error from DNS watcher"") - } - - for _, update := range updates { - switch update.Op { - case naming.Add: - level.Debug(w.log).Log(""msg"", ""adding connection"", ""addr"", update.Addr) -- ctx, cancel := context.WithCancel(w.ctx) -+ ctx, cancel := context.WithCancel(servCtx) - cancels[update.Addr] = cancel - w.runMany(ctx, update.Addr) - -@@ -139,7 +129,7 @@ func (w *worker) watchDNSLoop() { - } - - default: -- panic(""unknown op"") -+ return fmt.Errorf(""unknown op: %v"", update.Op) - } - } - } -diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go -index 057617e327786..bb68cb4106065 100644 ---- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go -+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go -@@ -154,12 +154,12 @@ func (prometheusCodec) MergeResponse(responses ...Response) (Response, error) { - func (prometheusCodec) DecodeRequest(_ context.Context, r *http.Request) (Request, error) { - var result PrometheusRequest - var err error -- result.Start, err = ParseTime(r.FormValue(""start"")) -+ result.Start, err = util.ParseTime(r.FormValue(""start"")) - if err != nil { - return nil, err - } - -- result.End, err = ParseTime(r.FormValue(""end"")) -+ result.End, err = util.ParseTime(r.FormValue(""end"")) - if err != nil { - return nil, err - } -@@ -331,19 +331,6 @@ func matrixMerge(resps []*PrometheusResponse) []SampleStream { - return result - } - --// ParseTime parses the string into an int64, milliseconds since epoch. --func ParseTime(s string) (int64, error) { -- if t, err := strconv.ParseFloat(s, 64); err == nil { -- s, ns := math.Modf(t) -- tm := time.Unix(int64(s), int64(ns*float64(time.Second))) -- return util.TimeMilliseconds(tm), nil -- } -- if t, err := time.Parse(time.RFC3339Nano, s); err == nil { -- return util.TimeMilliseconds(t), nil -- } -- return 0, httpgrpc.Errorf(http.StatusBadRequest, ""cannot parse %q to a valid timestamp"", s) --} -- - func parseDurationMs(s string) (int64, error) { - if d, err := strconv.ParseFloat(s, 64); err == nil { - ts := d * float64(time.Second/time.Millisecond) -diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/series/series_set.go b/vendor/github.com/cortexproject/cortex/pkg/querier/series/series_set.go -index fc0f42efc9670..bdf8920192b3a 100644 ---- a/vendor/github.com/cortexproject/cortex/pkg/querier/series/series_set.go -+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/series/series_set.go -@@ -19,6 +19,8 @@ package series - import ( - ""sort"" - -+ ""github.com/cortexproject/cortex/pkg/chunk/purger"" -+ - ""github.com/prometheus/common/model"" - ""github.com/prometheus/prometheus/pkg/labels"" - ""github.com/prometheus/prometheus/storage"" -@@ -187,3 +189,160 @@ type byLabels []storage.Series - func (b byLabels) Len() int { return len(b) } - func (b byLabels) Swap(i, j int) { b[i], b[j] = b[j], b[i] } - func (b byLabels) Less(i, j int) bool { return labels.Compare(b[i].Labels(), b[j].Labels()) < 0 } -+ -+type DeletedSeriesSet struct { -+ seriesSet storage.SeriesSet -+ tombstones *purger.TombstonesSet -+ queryInterval model.Interval -+} -+ -+func NewDeletedSeriesSet(seriesSet storage.SeriesSet, tombstones *purger.TombstonesSet, queryInterval model.Interval) storage.SeriesSet { -+ return &DeletedSeriesSet{ -+ seriesSet: seriesSet, -+ tombstones: tombstones, -+ queryInterval: queryInterval, -+ } -+} -+ -+func (d DeletedSeriesSet) Next() bool { -+ return d.seriesSet.Next() -+} -+ -+func (d DeletedSeriesSet) At() storage.Series { -+ series := d.seriesSet.At() -+ deletedIntervals := d.tombstones.GetDeletedIntervals(series.Labels(), d.queryInterval.Start, d.queryInterval.End) -+ -+ // series is deleted for whole query range so return empty series -+ if len(deletedIntervals) == 1 && deletedIntervals[0] == d.queryInterval { -+ return NewEmptySeries(series.Labels()) -+ } -+ -+ return NewDeletedSeries(series, deletedIntervals) -+} -+ -+func (d DeletedSeriesSet) Err() error { -+ return d.seriesSet.Err() -+} -+ -+type DeletedSeries struct { -+ series storage.Series -+ deletedIntervals []model.Interval -+} -+ -+func NewDeletedSeries(series storage.Series, deletedIntervals []model.Interval) storage.Series { -+ return &DeletedSeries{ -+ series: series, -+ deletedIntervals: deletedIntervals, -+ } -+} -+ -+func (d DeletedSeries) Labels() labels.Labels { -+ return d.series.Labels() -+} -+ -+func (d DeletedSeries) Iterator() storage.SeriesIterator { -+ return NewDeletedSeriesIterator(d.series.Iterator(), d.deletedIntervals) -+} -+ -+type DeletedSeriesIterator struct { -+ itr storage.SeriesIterator -+ deletedIntervals []model.Interval -+} -+ -+func NewDeletedSeriesIterator(itr storage.SeriesIterator, deletedIntervals []model.Interval) storage.SeriesIterator { -+ return &DeletedSeriesIterator{ -+ itr: itr, -+ deletedIntervals: deletedIntervals, -+ } -+} -+ -+func (d DeletedSeriesIterator) Seek(t int64) bool { -+ if found := d.itr.Seek(t); !found { -+ return false -+ } -+ -+ seekedTs, _ := d.itr.At() -+ if d.isDeleted(seekedTs) { -+ // point we have seeked into is deleted, Next() should find a new non-deleted sample which is after t and seekedTs -+ return d.Next() -+ } -+ -+ return true -+} -+ -+func (d DeletedSeriesIterator) At() (t int64, v float64) { -+ return d.itr.At() -+} -+ -+func (d DeletedSeriesIterator) Next() bool { -+ for d.itr.Next() { -+ ts, _ := d.itr.At() -+ -+ if d.isDeleted(ts) { -+ continue -+ } -+ return true -+ } -+ return false -+} -+ -+func (d DeletedSeriesIterator) Err() error { -+ return d.itr.Err() -+} -+ -+// isDeleted removes intervals which are past ts while checking for whether ts happens to be in one of the deleted intervals -+func (d *DeletedSeriesIterator) isDeleted(ts int64) bool { -+ mts := model.Time(ts) -+ -+ for _, interval := range d.deletedIntervals { -+ if mts > interval.End { -+ d.deletedIntervals = d.deletedIntervals[1:] -+ continue -+ } else if mts < interval.Start { -+ return false -+ } -+ -+ return true -+ } -+ -+ return false -+} -+ -+type emptySeries struct { -+ labels labels.Labels -+} -+ -+func NewEmptySeries(labels labels.Labels) storage.Series { -+ return emptySeries{labels} -+} -+ -+func (e emptySeries) Labels() labels.Labels { -+ return e.labels -+} -+ -+func (emptySeries) Iterator() storage.SeriesIterator { -+ return NewEmptySeriesIterator() -+} -+ -+type emptySeriesIterator struct { -+} -+ -+func NewEmptySeriesIterator() storage.SeriesIterator { -+ return emptySeriesIterator{} -+} -+ -+func (emptySeriesIterator) Seek(t int64) bool { -+ return false -+} -+ -+func (emptySeriesIterator) At() (t int64, v float64) { -+ return 0, 0 -+} -+ -+func (emptySeriesIterator) Next() bool { -+ return false -+} -+ -+func (emptySeriesIterator) Err() error { -+ return nil -+} -diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go -index f07cd7a319a81..62d6ca6352235 100644 ---- a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go -+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go -@@ -11,6 +11,7 @@ import ( - ""time"" - - ""github.com/go-kit/kit/log/level"" -+ perrors ""github.com/pkg/errors"" - ""github.com/prometheus/client_golang/prometheus"" - ""github.com/prometheus/client_golang/prometheus/promauto"" - -@@ -339,15 +340,10 @@ func (i *Lifecycler) HealthyInstancesCount() int { - } - - func (i *Lifecycler) loop(ctx context.Context) error { -- defer func() { -- level.Info(util.Logger).Log(""msg"", ""member.loop() exited gracefully"", ""ring"", i.RingName) -- }() -- - // First, see if we exist in the cluster, update our state to match if we do, - // and add ourselves (without tokens) if we don't. - if err := i.initRing(context.Background()); err != nil { -- level.Error(util.Logger).Log(""msg"", ""failed to join the ring"", ""ring"", i.RingName, ""err"", err) -- os.Exit(1) -+ return perrors.Wrapf(err, ""failed to join the ring %s"", i.RingName) - } - - // We do various period tasks -@@ -370,16 +366,14 @@ func (i *Lifecycler) loop(ctx context.Context) error { - // let's observe the ring. By using JOINING state, this ingester will be ignored by LEAVING - // ingesters, but we also signal that it is not fully functional yet. - if err := i.autoJoin(context.Background(), JOINING); err != nil { -- level.Error(util.Logger).Log(""msg"", ""failed to pick tokens in the KV store"", ""ring"", i.RingName, ""err"", err) -- os.Exit(1) -+ return perrors.Wrapf(err, ""failed to pick tokens in the KV store, ring: %s"", i.RingName) - } - - level.Info(util.Logger).Log(""msg"", ""observing tokens before going ACTIVE"", ""ring"", i.RingName) - observeChan = time.After(i.cfg.ObservePeriod) - } else { - if err := i.autoJoin(context.Background(), ACTIVE); err != nil { -- level.Error(util.Logger).Log(""msg"", ""failed to pick tokens in the KV store"", ""ring"", i.RingName, ""err"", err) -- os.Exit(1) -+ return perrors.Wrapf(err, ""failed to pick tokens in the KV store, ring: %s"", i.RingName) - } - } - } -@@ -416,6 +410,7 @@ func (i *Lifecycler) loop(ctx context.Context) error { - f() - - case <-ctx.Done(): -+ level.Info(util.Logger).Log(""msg"", ""lifecycler loop() exited gracefully"", ""ring"", i.RingName) - return nil - } - } -@@ -425,7 +420,13 @@ func (i *Lifecycler) loop(ctx context.Context) error { - // - send chunks to another ingester, if it can. - // - otherwise, flush chunks to the chunk store. - // - remove config from Consul. --func (i *Lifecycler) stopping(_ error) error { -+func (i *Lifecycler) stopping(runningError error) error { -+ if runningError != nil { -+ // previously lifecycler just called os.Exit (from loop method)... -+ // now it stops more gracefully, but also without doing any cleanup -+ return nil -+ } -+ - heartbeatTicker := time.NewTicker(i.cfg.HeartbeatPeriod) - defer heartbeatTicker.Stop() - -@@ -459,8 +460,7 @@ heartbeatLoop: - - if !i.cfg.SkipUnregister { - if err := i.unregister(context.Background()); err != nil { -- level.Error(util.Logger).Log(""msg"", ""Failed to unregister from the KV store"", ""ring"", i.RingName, ""err"", err) -- os.Exit(1) -+ return perrors.Wrapf(err, ""failed to unregister from the KV store, ring: %s"", i.RingName) - } - level.Info(util.Logger).Log(""msg"", ""instance removed from the KV store"", ""ring"", i.RingName) - } -diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/errors.go b/vendor/github.com/cortexproject/cortex/pkg/util/errors.go -new file mode 100644 -index 0000000000000..c372e819466b2 ---- /dev/null -+++ b/vendor/github.com/cortexproject/cortex/pkg/util/errors.go -@@ -0,0 +1,6 @@ -+package util -+ -+import ""errors"" -+ -+// ErrStopProcess is the error returned by a service as a hint to stop the server entirely. -+var ErrStopProcess = errors.New(""stop process"") -diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpc/util.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpc/util.go -new file mode 100644 -index 0000000000000..19ef615e62e69 ---- /dev/null -+++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpc/util.go -@@ -0,0 +1,17 @@ -+package grpc -+ -+import ( -+ ""github.com/gogo/status"" -+ ""google.golang.org/grpc/codes"" -+) -+ -+// IsGRPCContextCanceled returns whether the input error is a GRPC error wrapping -+// the context.Canceled error. -+func IsGRPCContextCanceled(err error) bool { -+ s, ok := status.FromError(err) -+ if !ok { -+ return false -+ } -+ -+ return s.Code() == codes.Canceled -+} -diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/module_service.go b/vendor/github.com/cortexproject/cortex/pkg/util/module_service.go -new file mode 100644 -index 0000000000000..5b0ccbd2cb281 ---- /dev/null -+++ b/vendor/github.com/cortexproject/cortex/pkg/util/module_service.go -@@ -0,0 +1,96 @@ -+package util -+ -+import ( -+ ""context"" -+ ""fmt"" -+ -+ ""github.com/go-kit/kit/log/level"" -+ ""github.com/pkg/errors"" -+ -+ ""github.com/cortexproject/cortex/pkg/util/services"" -+) -+ -+// This service wraps module service, and adds waiting for dependencies to start before starting, -+// and dependant modules to stop before stopping this module service. -+type moduleService struct { -+ services.Service -+ -+ service services.Service -+ name string -+ -+ // startDeps, stopDeps return map of service names to services -+ startDeps, stopDeps func(string) map[string]services.Service -+} -+ -+// NewModuleService wraps a module service, and makes sure that dependencies are started/stopped before module service starts or stops. -+// If any dependency fails to start, this service fails as well. -+// On stop, errors from failed dependencies are ignored. -+func NewModuleService(name string, service services.Service, startDeps, stopDeps func(string) map[string]services.Service) services.Service { -+ w := &moduleService{ -+ name: name, -+ service: service, -+ startDeps: startDeps, -+ stopDeps: stopDeps, -+ } -+ -+ w.Service = services.NewBasicService(w.start, w.run, w.stop) -+ return w -+} -+ -+func (w *moduleService) start(serviceContext context.Context) error { -+ // wait until all startDeps are running -+ startDeps := w.startDeps(w.name) -+ for m, s := range startDeps { -+ if s == nil { -+ continue -+ } -+ -+ level.Debug(Logger).Log(""msg"", ""module waiting for initialization"", ""module"", w.name, ""waiting_for"", m) -+ -+ err := s.AwaitRunning(serviceContext) -+ if err != nil { -+ return fmt.Errorf(""failed to start %v, because it depends on module %v, which has failed: %w"", w.name, m, err) -+ } -+ } -+ -+ // we don't want to let this service to stop until all dependant services are stopped, -+ // so we use independent context here -+ level.Info(Logger).Log(""msg"", ""initialising"", ""module"", w.name) -+ err := w.service.StartAsync(context.Background()) -+ if err != nil { -+ return errors.Wrapf(err, ""error starting module: %s"", w.name) -+ } -+ -+ return w.service.AwaitRunning(serviceContext) -+} -+ -+func (w *moduleService) run(serviceContext context.Context) error { -+ // wait until service stops, or context is canceled, whatever happens first. -+ // We don't care about exact error here -+ _ = w.service.AwaitTerminated(serviceContext) -+ return w.service.FailureCase() -+} -+ -+func (w *moduleService) stop(_ error) error { -+ // wait until all stopDeps have stopped -+ stopDeps := w.stopDeps(w.name) -+ for _, s := range stopDeps { -+ if s == nil { -+ continue -+ } -+ -+ // Passed context isn't canceled, so we can only get error here, if service -+ // fails. But we don't care *how* service stops, as long as it is done. -+ _ = s.AwaitTerminated(context.Background()) -+ } -+ -+ level.Debug(Logger).Log(""msg"", ""stopping"", ""module"", w.name) -+ -+ err := services.StopAndAwaitTerminated(context.Background(), w.service) -+ if err != nil && err != ErrStopProcess { -+ level.Warn(Logger).Log(""msg"", ""error stopping module"", ""module"", w.name, ""err"", err) -+ } else { -+ level.Info(Logger).Log(""msg"", ""module stopped"", ""module"", w.name) -+ } -+ return err -+} -diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/services/failure_watcher.go b/vendor/github.com/cortexproject/cortex/pkg/util/services/failure_watcher.go -new file mode 100644 -index 0000000000000..9b19f7b15cb34 ---- /dev/null -+++ b/vendor/github.com/cortexproject/cortex/pkg/util/services/failure_watcher.go -@@ -0,0 +1,35 @@ -+package services -+ -+import ( -+ ""github.com/pkg/errors"" -+) -+ -+// FailureWatcher waits for service failures, and passed them to the channel. -+type FailureWatcher struct { -+ ch chan error -+} -+ -+func NewFailureWatcher() *FailureWatcher { -+ return &FailureWatcher{ch: make(chan error)} -+} -+ -+// Returns channel for this watcher. If watcher is nil, returns nil channel. -+// Errors returned on the channel include failure case and service description. -+func (w *FailureWatcher) Chan() <-chan error { -+ if w == nil { -+ return nil -+ } -+ return w.ch -+} -+ -+func (w *FailureWatcher) WatchService(service Service) { -+ service.AddListener(NewListener(nil, nil, nil, nil, func(from State, failure error) { -+ w.ch <- errors.Wrapf(failure, ""service %v failed"", service) -+ })) -+} -+ -+func (w *FailureWatcher) WatchManager(manager *Manager) { -+ manager.AddListener(NewManagerListener(nil, nil, func(service Service) { -+ w.ch <- errors.Wrapf(service.FailureCase(), ""service %v failed"", service) -+ })) -+} -diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/time.go b/vendor/github.com/cortexproject/cortex/pkg/util/time.go -index f0e8f39b74fe8..3d2b22bd69059 100644 ---- a/vendor/github.com/cortexproject/cortex/pkg/util/time.go -+++ b/vendor/github.com/cortexproject/cortex/pkg/util/time.go -@@ -1,7 +1,27 @@ - package util - --import ""time"" -+import ( -+ ""math"" -+ ""net/http"" -+ ""strconv"" -+ ""time"" -+ -+ ""github.com/weaveworks/common/httpgrpc"" -+) - - func TimeMilliseconds(t time.Time) int64 { - return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) - } -+ -+// ParseTime parses the string into an int64, milliseconds since epoch. -+func ParseTime(s string) (int64, error) { -+ if t, err := strconv.ParseFloat(s, 64); err == nil { -+ s, ns := math.Modf(t) -+ tm := time.Unix(int64(s), int64(ns*float64(time.Second))) -+ return TimeMilliseconds(tm), nil -+ } -+ if t, err := time.Parse(time.RFC3339Nano, s); err == nil { -+ return TimeMilliseconds(t), nil -+ } -+ return 0, httpgrpc.Errorf(http.StatusBadRequest, ""cannot parse %q to a valid timestamp"", s) -+} -diff --git a/vendor/modules.txt b/vendor/modules.txt -index 434cb07fce34c..4c2fc02c67d11 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -130,7 +130,7 @@ github.com/coreos/go-systemd/sdjournal - # github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f - github.com/coreos/pkg/capnslog - github.com/coreos/pkg/dlopen --# github.com/cortexproject/cortex v0.7.0-rc.0 -+# github.com/cortexproject/cortex v0.7.1-0.20200316184320-acc42abdf56c - github.com/cortexproject/cortex/pkg/chunk - github.com/cortexproject/cortex/pkg/chunk/aws - github.com/cortexproject/cortex/pkg/chunk/azure -@@ -140,6 +140,7 @@ github.com/cortexproject/cortex/pkg/chunk/encoding - github.com/cortexproject/cortex/pkg/chunk/gcp - github.com/cortexproject/cortex/pkg/chunk/local - github.com/cortexproject/cortex/pkg/chunk/objectclient -+github.com/cortexproject/cortex/pkg/chunk/purger - github.com/cortexproject/cortex/pkg/chunk/storage - github.com/cortexproject/cortex/pkg/chunk/testutils - github.com/cortexproject/cortex/pkg/chunk/util -@@ -162,6 +163,7 @@ github.com/cortexproject/cortex/pkg/ring/kv/memberlist - github.com/cortexproject/cortex/pkg/util - github.com/cortexproject/cortex/pkg/util/extract - github.com/cortexproject/cortex/pkg/util/flagext -+github.com/cortexproject/cortex/pkg/util/grpc - github.com/cortexproject/cortex/pkg/util/grpcclient - github.com/cortexproject/cortex/pkg/util/limiter - github.com/cortexproject/cortex/pkg/util/middleware",loki,"update Cortex to master (#1799) - -* Update Cortex to master - -Signed-off-by: Peter Štibraný - -* Integrate latest Cortex master changes into loki - -- Frontend worker now needs to be started explicitly -- Lifecycler no longer exits on error. Distributor and Ingester do that now - -Signed-off-by: Peter Štibraný - -* Lint - -Signed-off-by: Peter Štibraný - -* go mod tidy - -Signed-off-by: Peter Štibraný - -* Update Cortex to master. After 0.7 release, it is now showing as v0.7.1-... - -Signed-off-by: Peter Štibraný " -86fbffff0c222e770ceca63bce0b58dccd14b984,2024-07-01 20:04:03,Jay Clifford,"docs: Updated bucket names and added warning (#13347) - -Co-authored-by: J Stickler ",False,"diff --git a/docs/sources/setup/install/helm/install-microservices/_index.md b/docs/sources/setup/install/helm/install-microservices/_index.md -index 9e0eb4d3307e6..4afca42d10b3e 100644 ---- a/docs/sources/setup/install/helm/install-microservices/_index.md -+++ b/docs/sources/setup/install/helm/install-microservices/_index.md -@@ -167,6 +167,10 @@ It is not recommended to run scalable mode with `filesystem` storage. For the pu - - After testing Loki with MinIO, it is recommended to configure Loki with an object storage provider. The following examples shows how to configure Loki with different object storage providers: - -+{{< admonition type=""caution"" >}} -+When deploying Loki using S3 Storage **DO NOT** use the default bucket names; `chunk`, `ruler` and `admin`. Choose a unique name for each bucket. For more information see the following [security update](https://grafana.com/blog/2024/06/27/grafana-security-update-grafana-loki-and-unintended-data-write-attempts-to-amazon-s3-buckets/). This caution does not apply when you are using MinIO. When using MinIO we recommend using the default bucket names. -+{{< /admonition >}} -+ - {{< code >}} - - ```s3 -@@ -192,9 +196,9 @@ After testing Loki with MinIO, it is recommended to configure Loki with an objec - storage: - type: s3 - bucketNames: -- chunks: ""chunks"" -- ruler: ""ruler"" -- admin: ""admin"" -+ chunks: """" -+ ruler: """" -+ admin: """" - s3: - # s3 URL can be used to specify the endpoint, access key, secret key, and bucket name - s3: s3://access_key:secret_access_key@custom_endpoint/bucket_name -@@ -343,4 +347,4 @@ To configure other storage providers, refer to the [Helm Chart Reference]({{< re - - ## Next Steps - * Configure an agent to [send log data to Loki](/docs/loki//send-data/). --* Monitor the Loki deployment using the [Meta Monitoring Healm chart](/docs/loki//setup/install/helm/monitor-and-alert/) -+* Monitor the Loki deployment using the [Meta Monitoring Helm chart](/docs/loki//setup/install/helm/monitor-and-alert/) -diff --git a/docs/sources/setup/install/helm/install-scalable/_index.md b/docs/sources/setup/install/helm/install-scalable/_index.md -index fed56e339d969..a39b6580a90b2 100644 ---- a/docs/sources/setup/install/helm/install-scalable/_index.md -+++ b/docs/sources/setup/install/helm/install-scalable/_index.md -@@ -128,6 +128,10 @@ It is not recommended to run scalable mode with `filesystem` storage. For the pu - - After testing Loki with MinIO, it is recommended to configure Loki with an object storage provider. The following examples shows how to configure Loki with different object storage providers: - -+{{< admonition type=""caution"" >}} -+When deploying Loki using S3 Storage **DO NOT** use the default bucket names; `chunk`, `ruler` and `admin`. Choose a unique name for each bucket. For more information see the following [security update](https://grafana.com/blog/2024/06/27/grafana-security-update-grafana-loki-and-unintended-data-write-attempts-to-amazon-s3-buckets/). This caution does not apply when you are using MinIO. When using MinIO we recommend using the default bucket names. -+{{< /admonition >}} -+ - {{< code >}} - - ```s3 -@@ -151,9 +155,9 @@ loki: - storage: - type: s3 - bucketNames: -- chunks: ""chunks"" -- ruler: ""ruler"" -- admin: ""admin"" -+ chunks: """" -+ ruler: """" -+ admin: """" - s3: - # s3 URL can be used to specify the endpoint, access key, secret key, and bucket name - s3: s3://access_key:secret_access_key@custom_endpoint/bucket_name -@@ -295,4 +299,4 @@ To configure other storage providers, refer to the [Helm Chart Reference]({{< re - - ## Next Steps - * Configure an agent to [send log data to Loki](/docs/loki//send-data/). --* Monitor the Loki deployment using the [Meta Monitoring Healm chart](/docs/loki//setup/install/helm/monitor-and-alert/) -\ No newline at end of file -+* Monitor the Loki deployment using the [Meta Monitoring Helm chart](/docs/loki//setup/install/helm/monitor-and-alert/)",docs,"Updated bucket names and added warning (#13347) - -Co-authored-by: J Stickler " -97912b6f4c36965a855bca4757d67b382b8d07d1,2021-07-07 14:14:30,Cyril Tovena,"Shard ingester queries. (#3852) - -* Shard ingester queries. - -This is still experimental but already yield from 2x to 6x faster for short period queries. - -I'm still playing with it but I want to share how I do it early. - -Signed-off-by: Cyril Tovena - -* Add notice of the code origin. - -Signed-off-by: Cyril Tovena - -* Update pkg/ingester/index/index.go - -Co-authored-by: Owen Diehl - -* Update pkg/ingester/index/index_test.go - -Co-authored-by: Owen Diehl - -* Align shards from ingester and storage. - -Utimately we should have a storage that relies on fingerprint, but that's harder to change. - -Signed-off-by: Cyril Tovena - -* Remove comment - -Signed-off-by: Cyril Tovena - -* Fixes delete index func - -Signed-off-by: Cyril Tovena - -* Test reverting. - -Signed-off-by: Cyril Tovena - -* Fixes a bug causing non deterministic hash. - -Signed-off-by: Cyril Tovena - -* Update pkg/ingester/index/index.go - -Co-authored-by: Owen Diehl - -* Update pkg/ingester/index/index.go - -Co-authored-by: Owen Diehl - -* Update pkg/ingester/index/index.go - -Co-authored-by: Owen Diehl - -* Update pkg/ingester/index/index_test.go - -Co-authored-by: Owen Diehl - -* Fixes build. - -Signed-off-by: Cyril Tovena - -* got linted :( - -Signed-off-by: Cyril Tovena - -Co-authored-by: Owen Diehl ",False,"diff --git a/pkg/ingester/index/index.go b/pkg/ingester/index/index.go -new file mode 100644 -index 0000000000000..f0b7ae5388119 ---- /dev/null -+++ b/pkg/ingester/index/index.go -@@ -0,0 +1,446 @@ -+// originally from https://github.com/cortexproject/cortex/blob/868898a2921c662dcd4f90683e8b95c927a8edd8/pkg/ingester/index/index.go -+// but modified to support sharding queries. -+package index -+ -+import ( -+ ""bytes"" -+ ""crypto/sha256"" -+ ""encoding/base64"" -+ ""encoding/binary"" -+ ""errors"" -+ ""fmt"" -+ ""sort"" -+ ""strconv"" -+ ""sync"" -+ ""unsafe"" -+ -+ ""github.com/prometheus/common/model"" -+ ""github.com/prometheus/prometheus/pkg/labels"" -+ -+ ""github.com/cortexproject/cortex/pkg/chunk"" -+ ""github.com/cortexproject/cortex/pkg/cortexpb"" -+ ""github.com/cortexproject/cortex/pkg/querier/astmapper"" -+) -+ -+const indexShards = 32 -+ -+var ErrInvalidShardQuery = errors.New(""incompatible index shard query"") -+ -+// InvertedIndex implements a in-memory inverted index from label pairs to fingerprints. -+// It is sharded to reduce lock contention on writes. -+type InvertedIndex struct { -+ totalShards uint32 -+ shards []*indexShard -+} -+ -+// New returns a new InvertedIndex. -+func New() *InvertedIndex { -+ return NewWithShards(indexShards) -+} -+ -+func NewWithShards(totalShards uint32) *InvertedIndex { -+ shards := make([]*indexShard, totalShards) -+ for i := uint32(0); i < totalShards; i++ { -+ shards[i] = &indexShard{ -+ idx: map[string]indexEntry{}, -+ shard: i, -+ } -+ } -+ return &InvertedIndex{ -+ totalShards: totalShards, -+ shards: shards, -+ } -+} -+ -+func (ii *InvertedIndex) getShards(shard *astmapper.ShardAnnotation) []*indexShard { -+ if shard == nil { -+ return ii.shards -+ } -+ -+ indexFactor := int(ii.totalShards) -+ // calculate the start of the hash ring desired -+ lowerBound := shard.Shard * indexFactor / shard.Of -+ // calculate the end of the hash ring desired -+ upperBound := (shard.Shard + 1) * indexFactor / shard.Of -+ // see if the upper bound is cleanly doesn't align cleanly with the next shard -+ // which can happen when the schema sharding factor and inverted index -+ // sharding factor are not multiples of each other. -+ rem := (shard.Shard + 1) * indexFactor % shard.Of -+ if rem > 0 { -+ // there's overlap on the upper shard -+ upperBound = upperBound + 1 -+ } -+ -+ return ii.shards[lowerBound:upperBound] -+} -+ -+func validateShard(totalShards uint32, shard *astmapper.ShardAnnotation) error { -+ if shard == nil { -+ return nil -+ } -+ if int(totalShards)%shard.Of != 0 || uint32(shard.Of) > totalShards { -+ return fmt.Errorf(""%w index_shard:%d query_shard:%d_%d"", ErrInvalidShardQuery, totalShards, shard.Of, shard.Shard) -+ } -+ return nil -+} -+ -+// Add a fingerprint under the specified labels. -+// NOTE: memory for `labels` is unsafe; anything retained beyond the -+// life of this function must be copied -+func (ii *InvertedIndex) Add(labels []cortexpb.LabelAdapter, fp model.Fingerprint) labels.Labels { -+ shardIndex := labelsSeriesIDHash(cortexpb.FromLabelAdaptersToLabels(labels)) -+ shard := ii.shards[shardIndex%ii.totalShards] -+ return shard.add(labels, fp) // add() returns 'interned' values so the original labels are not retained -+} -+ -+var ( -+ bufferPool = sync.Pool{ -+ New: func() interface{} { -+ return bytes.NewBuffer(make([]byte, 0, 1000)) -+ }, -+ } -+ base64Pool = sync.Pool{ -+ New: func() interface{} { -+ return bytes.NewBuffer(make([]byte, 0, base64.RawStdEncoding.EncodedLen(sha256.Size))) -+ }, -+ } -+) -+ -+func labelsSeriesIDHash(ls labels.Labels) uint32 { -+ b64 := base64Pool.Get().(*bytes.Buffer) -+ defer func() { -+ base64Pool.Put(b64) -+ }() -+ buf := b64.Bytes()[:b64.Cap()] -+ labelsSeriesID(ls, buf) -+ return binary.BigEndian.Uint32(buf) -+} -+ -+func labelsSeriesID(ls labels.Labels, dest []byte) { -+ buf := bufferPool.Get().(*bytes.Buffer) -+ defer func() { -+ buf.Reset() -+ bufferPool.Put(buf) -+ }() -+ labelsString(buf, ls) -+ h := sha256.Sum256(buf.Bytes()) -+ dest = dest[:base64.RawStdEncoding.EncodedLen(len(h))] -+ base64.RawStdEncoding.Encode(dest, h[:]) -+} -+ -+// Backwards-compatible with model.Metric.String() -+func labelsString(b *bytes.Buffer, ls labels.Labels) { -+ b.WriteByte('{') -+ i := 0 -+ for _, l := range ls { -+ if l.Name == labels.MetricName { -+ continue -+ } -+ if i > 0 { -+ b.WriteByte(',') -+ b.WriteByte(' ') -+ } -+ b.WriteString(l.Name) -+ b.WriteByte('=') -+ var buf [1000]byte -+ b.Write(strconv.AppendQuote(buf[:0], l.Value)) -+ i++ -+ } -+ b.WriteByte('}') -+} -+ -+// Lookup all fingerprints for the provided matchers. -+func (ii *InvertedIndex) Lookup(matchers []*labels.Matcher, shard *astmapper.ShardAnnotation) ([]model.Fingerprint, error) { -+ if len(matchers) == 0 { -+ return nil, nil -+ } -+ -+ if err := validateShard(ii.totalShards, shard); err != nil { -+ return nil, err -+ } -+ -+ result := []model.Fingerprint{} -+ shards := ii.getShards(shard) -+ for i := range shards { -+ fps := shards[i].lookup(matchers) -+ result = append(result, fps...) -+ } -+ -+ return result, nil -+} -+ -+// LabelNames returns all label names. -+func (ii *InvertedIndex) LabelNames(shard *astmapper.ShardAnnotation) ([]string, error) { -+ if err := validateShard(ii.totalShards, shard); err != nil { -+ return nil, err -+ } -+ shards := ii.getShards(shard) -+ results := make([][]string, 0, len(shards)) -+ for i := range shards { -+ shardResult := shards[i].labelNames() -+ results = append(results, shardResult) -+ } -+ -+ return mergeStringSlices(results), nil -+} -+ -+// LabelValues returns the values for the given label. -+func (ii *InvertedIndex) LabelValues(name string, shard *astmapper.ShardAnnotation) ([]string, error) { -+ if err := validateShard(ii.totalShards, shard); err != nil { -+ return nil, err -+ } -+ shards := ii.getShards(shard) -+ results := make([][]string, 0, len(shards)) -+ -+ for i := range shards { -+ shardResult := shards[i].labelValues(name) -+ results = append(results, shardResult) -+ } -+ -+ return mergeStringSlices(results), nil -+} -+ -+// Delete a fingerprint with the given label pairs. -+func (ii *InvertedIndex) Delete(labels labels.Labels, fp model.Fingerprint) { -+ shard := ii.shards[labelsSeriesIDHash(labels)%ii.totalShards] -+ shard.delete(labels, fp) -+} -+ -+// NB slice entries are sorted in fp order. -+type indexEntry struct { -+ name string -+ fps map[string]indexValueEntry -+} -+ -+type indexValueEntry struct { -+ value string -+ fps []model.Fingerprint -+} -+ -+type unlockIndex map[string]indexEntry -+ -+// This is the prevalent value for Intel and AMD CPUs as-at 2018. -+const cacheLineSize = 64 -+ -+type indexShard struct { -+ shard uint32 -+ mtx sync.RWMutex -+ idx unlockIndex -+ //nolint:structcheck,unused -+ pad [cacheLineSize - unsafe.Sizeof(sync.Mutex{}) - unsafe.Sizeof(unlockIndex{})]byte -+} -+ -+func copyString(s string) string { -+ return string([]byte(s)) -+} -+ -+// add metric to the index; return all the name/value pairs as a fresh -+// sorted slice, referencing 'interned' strings from the index so that -+// no references are retained to the memory of `metric`. -+func (shard *indexShard) add(metric []cortexpb.LabelAdapter, fp model.Fingerprint) labels.Labels { -+ shard.mtx.Lock() -+ defer shard.mtx.Unlock() -+ -+ internedLabels := make(labels.Labels, len(metric)) -+ -+ for i, pair := range metric { -+ values, ok := shard.idx[pair.Name] -+ if !ok { -+ values = indexEntry{ -+ name: copyString(pair.Name), -+ fps: map[string]indexValueEntry{}, -+ } -+ shard.idx[values.name] = values -+ } -+ fingerprints, ok := values.fps[pair.Value] -+ if !ok { -+ fingerprints = indexValueEntry{ -+ value: copyString(pair.Value), -+ } -+ } -+ // Insert into the right position to keep fingerprints sorted -+ j := sort.Search(len(fingerprints.fps), func(i int) bool { -+ return fingerprints.fps[i] >= fp -+ }) -+ fingerprints.fps = append(fingerprints.fps, 0) -+ copy(fingerprints.fps[j+1:], fingerprints.fps[j:]) -+ fingerprints.fps[j] = fp -+ values.fps[fingerprints.value] = fingerprints -+ internedLabels[i] = labels.Label{Name: values.name, Value: fingerprints.value} -+ } -+ sort.Sort(internedLabels) -+ return internedLabels -+} -+ -+func (shard *indexShard) lookup(matchers []*labels.Matcher) []model.Fingerprint { -+ // index slice values must only be accessed under lock, so all -+ // code paths must take a copy before returning -+ shard.mtx.RLock() -+ defer shard.mtx.RUnlock() -+ -+ // per-shard intersection is initially nil, which is a special case -+ // meaning ""everything"" when passed to intersect() -+ // loop invariant: result is sorted -+ var result []model.Fingerprint -+ for _, matcher := range matchers { -+ values, ok := shard.idx[matcher.Name] -+ if !ok { -+ return nil -+ } -+ var toIntersect model.Fingerprints -+ if matcher.Type == labels.MatchEqual { -+ fps := values.fps[matcher.Value] -+ toIntersect = append(toIntersect, fps.fps...) // deliberate copy -+ } else if matcher.Type == labels.MatchRegexp && len(chunk.FindSetMatches(matcher.Value)) > 0 { -+ // The lookup is of the form `=~""a|b|c|d""` -+ set := chunk.FindSetMatches(matcher.Value) -+ for _, value := range set { -+ toIntersect = append(toIntersect, values.fps[value].fps...) -+ } -+ sort.Sort(toIntersect) -+ } else { -+ // accumulate the matching fingerprints (which are all distinct) -+ // then sort to maintain the invariant -+ for value, fps := range values.fps { -+ if matcher.Matches(value) { -+ toIntersect = append(toIntersect, fps.fps...) -+ } -+ } -+ sort.Sort(toIntersect) -+ } -+ result = intersect(result, toIntersect) -+ if len(result) == 0 { -+ return nil -+ } -+ } -+ -+ return result -+} -+ -+func (shard *indexShard) labelNames() []string { -+ shard.mtx.RLock() -+ defer shard.mtx.RUnlock() -+ -+ results := make([]string, 0, len(shard.idx)) -+ for name := range shard.idx { -+ results = append(results, name) -+ } -+ -+ sort.Strings(results) -+ return results -+} -+ -+func (shard *indexShard) labelValues(name string) []string { -+ shard.mtx.RLock() -+ defer shard.mtx.RUnlock() -+ -+ values, ok := shard.idx[name] -+ if !ok { -+ return nil -+ } -+ -+ results := make([]string, 0, len(values.fps)) -+ for val := range values.fps { -+ results = append(results, val) -+ } -+ -+ sort.Strings(results) -+ return results -+} -+ -+func (shard *indexShard) delete(labels labels.Labels, fp model.Fingerprint) { -+ shard.mtx.Lock() -+ defer shard.mtx.Unlock() -+ -+ for _, pair := range labels { -+ name, value := pair.Name, pair.Value -+ values, ok := shard.idx[name] -+ if !ok { -+ continue -+ } -+ fingerprints, ok := values.fps[value] -+ if !ok { -+ continue -+ } -+ -+ j := sort.Search(len(fingerprints.fps), func(i int) bool { -+ return fingerprints.fps[i] >= fp -+ }) -+ -+ // see if search didn't find fp which matches the condition which means we don't have to do anything. -+ if j >= len(fingerprints.fps) || fingerprints.fps[j] != fp { -+ continue -+ } -+ fingerprints.fps = fingerprints.fps[:j+copy(fingerprints.fps[j:], fingerprints.fps[j+1:])] -+ -+ if len(fingerprints.fps) == 0 { -+ delete(values.fps, value) -+ } else { -+ values.fps[value] = fingerprints -+ } -+ -+ if len(values.fps) == 0 { -+ delete(shard.idx, name) -+ } else { -+ shard.idx[name] = values -+ } -+ } -+} -+ -+// intersect two sorted lists of fingerprints. Assumes there are no duplicate -+// fingerprints within the input lists. -+func intersect(a, b []model.Fingerprint) []model.Fingerprint { -+ if a == nil { -+ return b -+ } -+ result := []model.Fingerprint{} -+ for i, j := 0, 0; i < len(a) && j < len(b); { -+ if a[i] == b[j] { -+ result = append(result, a[i]) -+ } -+ if a[i] < b[j] { -+ i++ -+ } else { -+ j++ -+ } -+ } -+ return result -+} -+ -+func mergeStringSlices(ss [][]string) []string { -+ switch len(ss) { -+ case 0: -+ return nil -+ case 1: -+ return ss[0] -+ case 2: -+ return mergeTwoStringSlices(ss[0], ss[1]) -+ default: -+ halfway := len(ss) / 2 -+ return mergeTwoStringSlices( -+ mergeStringSlices(ss[:halfway]), -+ mergeStringSlices(ss[halfway:]), -+ ) -+ } -+} -+ -+func mergeTwoStringSlices(a, b []string) []string { -+ result := make([]string, 0, len(a)+len(b)) -+ i, j := 0, 0 -+ for i < len(a) && j < len(b) { -+ if a[i] < b[j] { -+ result = append(result, a[i]) -+ i++ -+ } else if a[i] > b[j] { -+ result = append(result, b[j]) -+ j++ -+ } else { -+ result = append(result, a[i]) -+ i++ -+ j++ -+ } -+ } -+ result = append(result, a[i:]...) -+ result = append(result, b[j:]...) -+ return result -+} -diff --git a/pkg/ingester/index/index_test.go b/pkg/ingester/index/index_test.go -new file mode 100644 -index 0000000000000..5bb5908b29827 ---- /dev/null -+++ b/pkg/ingester/index/index_test.go -@@ -0,0 +1,108 @@ -+package index -+ -+import ( -+ ""fmt"" -+ ""sort"" -+ ""testing"" -+ -+ ""github.com/cortexproject/cortex/pkg/cortexpb"" -+ ""github.com/cortexproject/cortex/pkg/querier/astmapper"" -+ ""github.com/cortexproject/cortex/pkg/util"" -+ ""github.com/prometheus/common/model"" -+ ""github.com/prometheus/prometheus/pkg/labels"" -+ ""github.com/stretchr/testify/require"" -+) -+ -+func Test_GetShards(t *testing.T) { -+ for _, tt := range []struct { -+ total uint32 -+ shard *astmapper.ShardAnnotation -+ expected []uint32 -+ }{ -+ // equal factors -+ {16, &astmapper.ShardAnnotation{Shard: 0, Of: 16}, []uint32{0}}, -+ {16, &astmapper.ShardAnnotation{Shard: 4, Of: 16}, []uint32{4}}, -+ {16, &astmapper.ShardAnnotation{Shard: 15, Of: 16}, []uint32{15}}, -+ -+ // idx factor a larger multiple of schema factor -+ {32, &astmapper.ShardAnnotation{Shard: 0, Of: 16}, []uint32{0, 1}}, -+ {32, &astmapper.ShardAnnotation{Shard: 4, Of: 16}, []uint32{8, 9}}, -+ {32, &astmapper.ShardAnnotation{Shard: 15, Of: 16}, []uint32{30, 31}}, -+ {64, &astmapper.ShardAnnotation{Shard: 15, Of: 16}, []uint32{60, 61, 62, 63}}, -+ -+ // schema factor is a larger multiple of idx factor -+ {16, &astmapper.ShardAnnotation{Shard: 0, Of: 32}, []uint32{0}}, -+ {16, &astmapper.ShardAnnotation{Shard: 4, Of: 32}, []uint32{2}}, -+ {16, &astmapper.ShardAnnotation{Shard: 15, Of: 32}, []uint32{7}}, -+ -+ // idx factor smaller but not a multiple of schema factor -+ {4, &astmapper.ShardAnnotation{Shard: 0, Of: 5}, []uint32{0}}, -+ {4, &astmapper.ShardAnnotation{Shard: 1, Of: 5}, []uint32{0, 1}}, -+ {4, &astmapper.ShardAnnotation{Shard: 4, Of: 5}, []uint32{3}}, -+ -+ // schema factor smaller but not a multiple of idx factor -+ {8, &astmapper.ShardAnnotation{Shard: 0, Of: 5}, []uint32{0, 1}}, -+ {8, &astmapper.ShardAnnotation{Shard: 2, Of: 5}, []uint32{3, 4}}, -+ {8, &astmapper.ShardAnnotation{Shard: 3, Of: 5}, []uint32{4, 5, 6}}, -+ {8, &astmapper.ShardAnnotation{Shard: 4, Of: 5}, []uint32{6, 7}}, -+ } { -+ tt := tt -+ t.Run(tt.shard.String()+fmt.Sprintf(""_total_%d"", tt.total), func(t *testing.T) { -+ ii := NewWithShards(tt.total) -+ res := ii.getShards(tt.shard) -+ resInt := []uint32{} -+ for _, r := range res { -+ resInt = append(resInt, r.shard) -+ } -+ require.Equal(t, tt.expected, resInt) -+ }) -+ } -+} -+ -+func Test_ValidateShards(t *testing.T) { -+ require.NoError(t, validateShard(32, &astmapper.ShardAnnotation{Shard: 1, Of: 16})) -+} -+ -+var ( -+ result uint32 -+ lbs = []cortexpb.LabelAdapter{ -+ {Name: ""foo"", Value: ""bar""}, -+ } -+ buf = make([]byte, 0, 1024) -+) -+ -+func BenchmarkHash(b *testing.B) { -+ b.Run(""sha256"", func(b *testing.B) { -+ for n := 0; n < b.N; n++ { -+ result = labelsSeriesIDHash(cortexpb.FromLabelAdaptersToLabels(lbs)) % 16 -+ } -+ }) -+ b.Run(""xxash"", func(b *testing.B) { -+ for n := 0; n < b.N; n++ { -+ var fp uint64 -+ fp, buf = cortexpb.FromLabelAdaptersToLabels(lbs).HashWithoutLabels(buf, []string(nil)...) -+ result = util.HashFP(model.Fingerprint(fp)) % 16 -+ } -+ }) -+} -+ -+func TestDeleteAddLoopkup(t *testing.T) { -+ index := New() -+ lbs := []cortexpb.LabelAdapter{ -+ {Name: ""foo"", Value: ""foo""}, -+ {Name: ""bar"", Value: ""bar""}, -+ {Name: ""buzz"", Value: ""buzz""}, -+ } -+ sort.Sort(cortexpb.FromLabelAdaptersToLabels(lbs)) -+ -+ require.Equal(t, uint32(7), labelsSeriesIDHash(cortexpb.FromLabelAdaptersToLabels(lbs))%32) -+ // make sure we consistent -+ require.Equal(t, uint32(7), labelsSeriesIDHash(cortexpb.FromLabelAdaptersToLabels(lbs))%32) -+ index.Add(lbs, model.Fingerprint((cortexpb.FromLabelAdaptersToLabels(lbs).Hash()))) -+ index.Delete(cortexpb.FromLabelAdaptersToLabels(lbs), model.Fingerprint(cortexpb.FromLabelAdaptersToLabels(lbs).Hash())) -+ ids, err := index.Lookup([]*labels.Matcher{ -+ labels.MustNewMatcher(labels.MatchEqual, ""foo"", ""foo""), -+ }, nil) -+ require.NoError(t, err) -+ require.Len(t, ids, 0) -+} -diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go -index 1d3f3851f31f2..e98020d88d7f0 100644 ---- a/pkg/ingester/instance.go -+++ b/pkg/ingester/instance.go -@@ -8,6 +8,7 @@ import ( - ""syscall"" - - ""github.com/cortexproject/cortex/pkg/cortexpb"" -+ ""github.com/cortexproject/cortex/pkg/querier/astmapper"" - ""github.com/go-kit/kit/log/level"" - ""github.com/pkg/errors"" - ""github.com/prometheus/client_golang/prometheus"" -@@ -18,10 +19,10 @@ import ( - ""github.com/weaveworks/common/httpgrpc"" - ""go.uber.org/atomic"" - -- ""github.com/cortexproject/cortex/pkg/ingester/index"" - cutil ""github.com/cortexproject/cortex/pkg/util"" - util_log ""github.com/cortexproject/cortex/pkg/util/log"" - -+ ""github.com/grafana/loki/pkg/ingester/index"" - ""github.com/grafana/loki/pkg/iter"" - ""github.com/grafana/loki/pkg/logproto"" - ""github.com/grafana/loki/pkg/logql"" -@@ -300,9 +301,22 @@ func (i *instance) Query(ctx context.Context, req logql.SelectLogParams) ([]iter - ingStats := stats.GetIngesterData(ctx) - var iters []iter.EntryIterator - -+ var shard *astmapper.ShardAnnotation -+ shards, err := logql.ParseShards(req.Shards) -+ if err != nil { -+ return nil, err -+ } -+ if len(shards) > 1 { -+ return nil, errors.New(""only one shard per ingester query is supported"") -+ } -+ if len(shards) == 1 { -+ shard = &shards[0] -+ } -+ - err = i.forMatchingStreams( - ctx, - expr.Matchers(), -+ shard, - func(stream *stream) error { - iter, err := stream.Iterator(ctx, ingStats, req.Start, req.End, req.Direction, pipeline.ForStream(stream.labels)) - if err != nil { -@@ -331,9 +345,23 @@ func (i *instance) QuerySample(ctx context.Context, req logql.SelectSampleParams - - ingStats := stats.GetIngesterData(ctx) - var iters []iter.SampleIterator -+ -+ var shard *astmapper.ShardAnnotation -+ shards, err := logql.ParseShards(req.Shards) -+ if err != nil { -+ return nil, err -+ } -+ if len(shards) > 1 { -+ return nil, errors.New(""only one shard per ingester query is supported"") -+ } -+ if len(shards) == 1 { -+ shard = &shards[0] -+ } -+ - err = i.forMatchingStreams( - ctx, - expr.Selector().Matchers(), -+ shard, - func(stream *stream) error { - iter, err := stream.SampleIterator(ctx, ingStats, req.Start, req.End, extractor.ForStream(stream.labels)) - if err != nil { -@@ -353,13 +381,19 @@ func (i *instance) QuerySample(ctx context.Context, req logql.SelectSampleParams - func (i *instance) Label(_ context.Context, req *logproto.LabelRequest) (*logproto.LabelResponse, error) { - var labels []string - if req.Values { -- values := i.index.LabelValues(req.Name) -+ values, err := i.index.LabelValues(req.Name, nil) -+ if err != nil { -+ return nil, err -+ } - labels = make([]string, len(values)) - for i := 0; i < len(values); i++ { - labels[i] = values[i] - } - } else { -- names := i.index.LabelNames() -+ names, err := i.index.LabelNames(nil) -+ if err != nil { -+ return nil, err -+ } - labels = make([]string, len(names)) - for i := 0; i < len(names); i++ { - labels[i] = names[i] -@@ -396,7 +430,7 @@ func (i *instance) Series(ctx context.Context, req *logproto.SeriesRequest) (*lo - } else { - dedupedSeries := make(map[uint64]logproto.SeriesIdentifier) - for _, matchers := range groups { -- err = i.forMatchingStreams(ctx, matchers, func(stream *stream) error { -+ err = i.forMatchingStreams(ctx, matchers, nil, func(stream *stream) error { - // consider the stream only if it overlaps the request time range - if shouldConsiderStream(stream, req) { - // exit early when this stream was added by an earlier group -@@ -458,13 +492,17 @@ func (i *instance) forAllStreams(ctx context.Context, fn func(*stream) error) er - func (i *instance) forMatchingStreams( - ctx context.Context, - matchers []*labels.Matcher, -+ shards *astmapper.ShardAnnotation, - fn func(*stream) error, - ) error { - i.streamsMtx.RLock() - defer i.streamsMtx.RUnlock() - - filters, matchers := cutil.SplitFiltersAndMatchers(matchers) -- ids := i.index.Lookup(matchers) -+ ids, err := i.index.Lookup(matchers, shards) -+ if err != nil { -+ return err -+ } - var chunkFilter storage.ChunkFilterer - if i.chunkFilter != nil { - chunkFilter = i.chunkFilter.ForRequest(ctx) -@@ -492,7 +530,7 @@ outer: - } - - func (i *instance) addNewTailer(ctx context.Context, t *tailer) error { -- if err := i.forMatchingStreams(ctx, t.matchers, func(s *stream) error { -+ if err := i.forMatchingStreams(ctx, t.matchers, nil, func(s *stream) error { - s.addTailer(t) - return nil - }); err != nil { -diff --git a/pkg/querier/queryrange/querysharding.go b/pkg/querier/queryrange/querysharding.go -index c396054532ee0..47837f745cbf8 100644 ---- a/pkg/querier/queryrange/querysharding.go -+++ b/pkg/querier/queryrange/querysharding.go -@@ -44,17 +44,11 @@ func NewQueryShardMiddleware( - }) - - return queryrange.MiddlewareFunc(func(next queryrange.Handler) queryrange.Handler { -- return &shardSplitter{ -- MinShardingLookback: minShardingLookback, -- shardingware: queryrange.MergeMiddlewares( -- queryrange.InstrumentMiddleware(""shardingware"", middlewareMetrics), -- mapperware, -- ).Wrap(next), -- now: time.Now, -- next: queryrange.InstrumentMiddleware(""sharding-bypass"", middlewareMetrics).Wrap(next), -- } -+ return queryrange.MergeMiddlewares( -+ queryrange.InstrumentMiddleware(""shardingware"", middlewareMetrics), -+ mapperware, -+ ).Wrap(next) - }) -- - } - - func newASTMapperware(",unknown,"Shard ingester queries. (#3852) - -* Shard ingester queries. - -This is still experimental but already yield from 2x to 6x faster for short period queries. - -I'm still playing with it but I want to share how I do it early. - -Signed-off-by: Cyril Tovena - -* Add notice of the code origin. - -Signed-off-by: Cyril Tovena - -* Update pkg/ingester/index/index.go - -Co-authored-by: Owen Diehl - -* Update pkg/ingester/index/index_test.go - -Co-authored-by: Owen Diehl - -* Align shards from ingester and storage. - -Utimately we should have a storage that relies on fingerprint, but that's harder to change. - -Signed-off-by: Cyril Tovena - -* Remove comment - -Signed-off-by: Cyril Tovena - -* Fixes delete index func - -Signed-off-by: Cyril Tovena - -* Test reverting. - -Signed-off-by: Cyril Tovena - -* Fixes a bug causing non deterministic hash. - -Signed-off-by: Cyril Tovena - -* Update pkg/ingester/index/index.go - -Co-authored-by: Owen Diehl - -* Update pkg/ingester/index/index.go - -Co-authored-by: Owen Diehl - -* Update pkg/ingester/index/index.go - -Co-authored-by: Owen Diehl - -* Update pkg/ingester/index/index_test.go - -Co-authored-by: Owen Diehl - -* Fixes build. - -Signed-off-by: Cyril Tovena - -* got linted :( - -Signed-off-by: Cyril Tovena - -Co-authored-by: Owen Diehl " -2fcde18207c58552298a87d61f748edb89b86ede,2023-08-03 18:17:35,Mohamed-Amine Bouqsimi,"Fix query_timeout warning messages (#10083) - -**What this PR does / why we need it**: -This PR fixes the warning messages that are logged in relation to the -timeouts migration. - -The messages reference the per-tenant config, but the config in question -in the `AdjustForTimeoutsMigration` function is the global -`limits_config`. This is a source of confusion for users. - - -**Which issue(s) this PR fixes**: -Fixes #9801",False,"diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md -index fc78f4de82dba..fc8d32643d7f5 100644 ---- a/docs/sources/configure/_index.md -+++ b/docs/sources/configure/_index.md -@@ -2446,8 +2446,8 @@ The `limits_config` block configures global and per-tenant limits in Loki. - [query_ready_index_num_days: | default = 0] - - # Timeout when querying backends (ingesters or storage) during the execution of --# a query request. If a specific per-tenant timeout is used, this timeout is --# ignored. -+# a query request. When a specific per-tenant timeout is used, the global -+# timeout is ignored. - # CLI flag: -querier.query-timeout - [query_timeout: | default = 1m] - -diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go -index f115892906b20..9c80db00ab087 100644 ---- a/pkg/loki/loki.go -+++ b/pkg/loki/loki.go -@@ -280,14 +280,14 @@ func (c *Config) Validate() error { - // - If only the querier:engine:timeout was explicitly configured, warn the user and use it everywhere. - func AdjustForTimeoutsMigration(c *Config) error { - engineTimeoutIsDefault := c.Querier.Engine.Timeout == logql.DefaultEngineTimeout -- perTenantTimeoutIsDefault := c.LimitsConfig.QueryTimeout.String() == validation.DefaultPerTenantQueryTimeout -- if engineTimeoutIsDefault && perTenantTimeoutIsDefault { -+ globalTimeoutIsDefault := c.LimitsConfig.QueryTimeout.String() == validation.DefaultPerTenantQueryTimeout -+ if engineTimeoutIsDefault && globalTimeoutIsDefault { - if err := c.LimitsConfig.QueryTimeout.Set(c.Querier.Engine.Timeout.String()); err != nil { -- return fmt.Errorf(""couldn't set per-tenant query_timeout as the engine timeout value: %w"", err) -+ return fmt.Errorf(""couldn't set global query_timeout as the engine timeout value: %w"", err) - } - level.Warn(util_log.Logger).Log(""msg"", - fmt.Sprintf( -- ""per-tenant timeout not configured, using default engine timeout (%q). This behavior will change in the next major to always use the default per-tenant timeout (%q)."", -+ ""global timeout not configured, using default engine timeout (%q). This behavior will change in the next major to always use the default global timeout (%q)."", - c.Querier.Engine.Timeout.String(), - c.LimitsConfig.QueryTimeout.String(), - ), -@@ -295,10 +295,10 @@ func AdjustForTimeoutsMigration(c *Config) error { - return nil - } - -- if !perTenantTimeoutIsDefault && !engineTimeoutIsDefault { -+ if !globalTimeoutIsDefault && !engineTimeoutIsDefault { - level.Warn(util_log.Logger).Log(""msg"", - fmt.Sprintf( -- ""using configured per-tenant timeout (%q) as the default (can be overridden per-tenant in the limits_config). Configured engine timeout (%q) is deprecated and will be ignored."", -+ ""using configured global timeout (%q) as the default (can be overridden per-tenant in the limits_config). Configured engine timeout (%q) is deprecated and will be ignored."", - c.LimitsConfig.QueryTimeout.String(), - c.Querier.Engine.Timeout.String(), - ), -@@ -306,9 +306,9 @@ func AdjustForTimeoutsMigration(c *Config) error { - return nil - } - -- if perTenantTimeoutIsDefault && !engineTimeoutIsDefault { -+ if globalTimeoutIsDefault && !engineTimeoutIsDefault { - if err := c.LimitsConfig.QueryTimeout.Set(c.Querier.Engine.Timeout.String()); err != nil { -- return fmt.Errorf(""couldn't set per-tenant query_timeout as the engine timeout value: %w"", err) -+ return fmt.Errorf(""couldn't set global query_timeout as the engine timeout value: %w"", err) - } - level.Warn(util_log.Logger).Log(""msg"", - fmt.Sprintf( -diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go -index 37971bd2a0c31..4e472ef1987c0 100644 ---- a/pkg/validation/limits.go -+++ b/pkg/validation/limits.go -@@ -232,7 +232,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { - _ = l.MaxQueryRange.Set(""0s"") - f.Var(&l.MaxQueryRange, ""querier.max-query-range"", ""Limit the length of the [range] inside a range query. Default is 0 or unlimited"") - _ = l.QueryTimeout.Set(DefaultPerTenantQueryTimeout) -- f.Var(&l.QueryTimeout, ""querier.query-timeout"", ""Timeout when querying backends (ingesters or storage) during the execution of a query request. If a specific per-tenant timeout is used, this timeout is ignored."") -+ f.Var(&l.QueryTimeout, ""querier.query-timeout"", ""Timeout when querying backends (ingesters or storage) during the execution of a query request. When a specific per-tenant timeout is used, the global timeout is ignored."") - - _ = l.MaxQueryLookback.Set(""0s"") - f.Var(&l.MaxQueryLookback, ""querier.max-query-lookback"", ""Limit how far back in time series data and metadata can be queried, up until lookback duration ago. This limit is enforced in the query frontend, the querier and the ruler. If the requested time range is outside the allowed range, the request will not fail, but will be modified to only query data within the allowed time range. The default value of 0 does not set a limit."")",unknown,"Fix query_timeout warning messages (#10083) - -**What this PR does / why we need it**: -This PR fixes the warning messages that are logged in relation to the -timeouts migration. - -The messages reference the per-tenant config, but the config in question -in the `AdjustForTimeoutsMigration` function is the global -`limits_config`. This is a source of confusion for users. - - -**Which issue(s) this PR fixes**: -Fixes #9801" -d63268bf48b2f0d82436f6212a354e5923f3c14c,2019-04-12 10:28:53,Sergey Leleko,"update promtail-examples.md - -rewrite exist example, and add more examples with descriptions",False,"diff --git a/docs/promtail-examples.md b/docs/promtail-examples.md -index cc09dd8fd0d46..dda607add244b 100644 ---- a/docs/promtail-examples.md -+++ b/docs/promtail-examples.md -@@ -1,7 +1,10 @@ - # promtail examples - #### In this file you can see simple examples of configure promtail - --For work with 2 and more sources: -+* This example of config promtail based on original docker [config](https://github.com/grafana/loki/blob/master/cmd/promtail/promtail-docker-config.yaml) -+and show how work with 2 and more sources: -+ -+Filename for example: my-docker-config.yaml - ``` - server: - http_listen_port: 9080 -@@ -35,12 +38,23 @@ scrape_configs: - __path__: /srv/log/someone_service/*.log - - ``` --#### Description -+##### Description - Scrape_config section of config.yaml contents are various jobs for parsing your logs on current host - - `job` and `host` these are tags on which you can filter parsed logs date on Grafana later - --`__path__` it is path to directory where stored your logs. (*) -+`__path__` it is path to directory where stored your logs. -+ -+If you run promtail and this config.yaml in Docker container, don't forget use docker volumes for mapping real directories -+with log to those folders in the container. - --* - If you run promtail and this config.yaml in Docker container, you can use docker volumes for mapping real directories --with log to those folders in the container. -+* See next example of Dockerfile, who use our modified promtail config (my-docker-config.yaml) -+1) Create folder, for example `promtail`, then new folder build and in this filder conf and place there `my-docker-config.yaml`. -+2) Create new Dockerfile in root folder `promtail`, with contents -+``` -+FROM grafana/promtail:latest -+COPY build/conf /etc/promtail -+``` -+3) Create your Docker image based on original Promtail image and tag it, for example `mypromtail-image` -+3) After that you can run Docker container by this command: -+`docker run -d --name promtail --network loki_network -p 9080:9080 -v /var/log:/var/log -v /srv/log/someone_service:/srv/log/someone_service mypromtail-image -config.file=/etc/promtail/my-docker-config.yaml`",unknown,"update promtail-examples.md - -rewrite exist example, and add more examples with descriptions" -07ece2bc2a0ff098dc064049751e5043dd9dfb71,2021-01-21 14:07:35,Cyril Tovena,"Improve checkpoint series iterator. (#3193) - -* Add basic benchmark. - -Signed-off-by: Cyril Tovena - -* Improves memory usage of checkpointer series iterator. - -Signed-off-by: Cyril Tovena - -* make lint. - -Signed-off-by: Cyril Tovena - -* better size computation. - -Signed-off-by: Cyril Tovena - -* Fixes test ordering flakyness. - -Signed-off-by: Cyril Tovena ",False,"diff --git a/pkg/chunkenc/encoding_helpers.go b/pkg/chunkenc/encoding_helpers.go -index 2850c69f1d2f9..66e90dae06fee 100644 ---- a/pkg/chunkenc/encoding_helpers.go -+++ b/pkg/chunkenc/encoding_helpers.go -@@ -15,8 +15,7 @@ type encbuf struct { - func (e *encbuf) reset() { e.b = e.b[:0] } - func (e *encbuf) get() []byte { return e.b } - --func (e *encbuf) putBytes(b []byte) { e.b = append(e.b, b...) } --func (e *encbuf) putByte(c byte) { e.b = append(e.b, c) } -+func (e *encbuf) putByte(c byte) { e.b = append(e.b, c) } - - func (e *encbuf) putBE64int(x int) { e.putBE64(uint64(x)) } - func (e *encbuf) putUvarint(x int) { e.putUvarint64(uint64(x)) } -diff --git a/pkg/chunkenc/memchunk.go b/pkg/chunkenc/memchunk.go -index d56408fcc0112..ec81e8aa99d40 100644 ---- a/pkg/chunkenc/memchunk.go -+++ b/pkg/chunkenc/memchunk.go -@@ -33,9 +33,7 @@ const ( - maxLineLength = 1024 * 1024 * 1024 - ) - --var ( -- magicNumber = uint32(0x12EE56A) --) -+var magicNumber = uint32(0x12EE56A) - - // The table gets initialized with sync.Once but may still cause a race - // with any other use of the crc32 package anywhere. Thus we initialize it -@@ -155,20 +153,36 @@ func (hb *headBlock) serialise(pool WriterPool) ([]byte, error) { - // CheckpointBytes serializes a headblock to []byte. This is used by the WAL checkpointing, - // which does not want to mutate a chunk by cutting it (otherwise risking content address changes), but - // needs to serialize/deserialize the data to disk to ensure data durability. --func (hb *headBlock) CheckpointBytes(version byte) ([]byte, error) { -- encB := BytesBufferPool.Get(1 << 10).([]byte) -+func (hb *headBlock) CheckpointBytes(version byte, b []byte) ([]byte, error) { -+ buf := bytes.NewBuffer(b[:0]) -+ err := hb.CheckpointTo(version, buf) -+ return buf.Bytes(), err -+} - -- defer func() { -- BytesBufferPool.Put(encB[:0]) -- }() -+// CheckpointSize returns the estimated size of the headblock checkpoint. -+func (hb *headBlock) CheckpointSize(version byte) int { -+ size := 1 // version -+ size += binary.MaxVarintLen32 * 2 // total entries + total size -+ size += binary.MaxVarintLen64 * 2 // mint,maxt -+ size += (binary.MaxVarintLen64 + binary.MaxVarintLen32) * len(hb.entries) // ts + len of log line. - -- buf := bytes.NewBuffer(make([]byte, 0, 1<<10)) -- eb := encbuf{b: encB} -+ for _, e := range hb.entries { -+ size += len(e.s) -+ } -+ return size -+} -+ -+// CheckpointTo serializes a headblock to a `io.Writer`. see `CheckpointBytes`. -+func (hb *headBlock) CheckpointTo(version byte, w io.Writer) error { -+ eb := EncodeBufferPool.Get().(*encbuf) -+ defer EncodeBufferPool.Put(eb) -+ -+ eb.reset() - - eb.putByte(version) -- _, err := buf.Write(eb.get()) -+ _, err := w.Write(eb.get()) - if err != nil { -- return nil, errors.Wrap(err, ""write headBlock version"") -+ return errors.Wrap(err, ""write headBlock version"") - } - eb.reset() - -@@ -177,27 +191,27 @@ func (hb *headBlock) CheckpointBytes(version byte) ([]byte, error) { - eb.putVarint64(hb.mint) - eb.putVarint64(hb.maxt) - -- _, err = buf.Write(eb.get()) -+ _, err = w.Write(eb.get()) - if err != nil { -- return nil, errors.Wrap(err, ""write headBlock metas"") -+ return errors.Wrap(err, ""write headBlock metas"") - } - eb.reset() - - for _, entry := range hb.entries { - eb.putVarint64(entry.t) - eb.putUvarint(len(entry.s)) -- _, err = buf.Write(eb.get()) -+ _, err = w.Write(eb.get()) - if err != nil { -- return nil, errors.Wrap(err, ""write headBlock entry ts"") -+ return errors.Wrap(err, ""write headBlock entry ts"") - } - eb.reset() - -- _, err := buf.WriteString(entry.s) -+ _, err := io.WriteString(w, entry.s) - if err != nil { -- return nil, errors.Wrap(err, ""write headblock entry line"") -+ return errors.Wrap(err, ""write headblock entry line"") - } - } -- return buf.Bytes(), nil -+ return nil - } - - func (hb *headBlock) FromCheckpoint(b []byte) error { -@@ -361,6 +375,37 @@ func (c *MemChunk) Bytes() ([]byte, error) { - return c.BytesWith(nil) - } - -+// BytesSize returns the raw size of the chunk. -+// NOTE: This does not account for the head block nor include any head block data. -+func (c *MemChunk) BytesSize() int { -+ size := 4 // magic number -+ size++ // format -+ if c.format > chunkFormatV1 { -+ size++ // chunk format v2+ has a byte for encoding. -+ } -+ -+ // blocks -+ for _, b := range c.blocks { -+ size += len(b.b) + crc32.Size // size + crc -+ -+ size += binary.MaxVarintLen32 // num entries -+ size += binary.MaxVarintLen64 // mint -+ size += binary.MaxVarintLen64 // maxt -+ size += binary.MaxVarintLen32 // offset -+ if c.format == chunkFormatV3 { -+ size += binary.MaxVarintLen32 // uncompressed size -+ } -+ size += binary.MaxVarintLen32 // len(b) -+ } -+ -+ // blockmeta -+ size += binary.MaxVarintLen32 // len blocks -+ -+ size += crc32.Size // metablock crc -+ size += 8 // metaoffset -+ return size -+} -+ - // WriteTo Implements io.WriterTo - // NOTE: Does not cut head block or include any head block data. - // For this to be the case you must call Close() first. -@@ -368,11 +413,16 @@ func (c *MemChunk) Bytes() ([]byte, error) { - // result in different content addressable chunks in storage based on the timing of when - // they were checkpointed (which would cause new blocks to be cut early). - func (c *MemChunk) WriteTo(w io.Writer) (int64, error) { -- crc32Hash := newCRC32() -+ crc32Hash := crc32HashPool.Get().(hash.Hash32) -+ defer crc32HashPool.Put(crc32Hash) -+ crc32Hash.Reset() - - offset := int64(0) - -- eb := encbuf{b: make([]byte, 0, 1<<10)} -+ eb := EncodeBufferPool.Get().(*encbuf) -+ defer EncodeBufferPool.Put(eb) -+ -+ eb.reset() - - // Write the header (magicNum + version). - eb.putBE32(magicNumber) -@@ -392,11 +442,13 @@ func (c *MemChunk) WriteTo(w io.Writer) (int64, error) { - for i, b := range c.blocks { - c.blocks[i].offset = int(offset) - -- eb.reset() -- eb.putBytes(b.b) -- eb.putHash(crc32Hash) -+ crc32Hash.Reset() -+ _, err := crc32Hash.Write(b.b) -+ if err != nil { -+ return offset, errors.Wrap(err, ""write block"") -+ } - -- n, err := w.Write(eb.get()) -+ n, err := w.Write(crc32Hash.Sum(b.b)) - if err != nil { - return offset, errors.Wrap(err, ""write block"") - } -@@ -439,25 +491,29 @@ func (c *MemChunk) WriteTo(w io.Writer) (int64, error) { - return offset, nil - } - --// SerializeForCheckpoint returns []bytes representing the chunk & head. This is to ensure eventually --// flushed chunks don't have different substructures depending on when they were checkpointed. -+// SerializeForCheckpointTo serialize the chunk & head into different `io.Writer` for checkpointing use. -+// This is to ensure eventually flushed chunks don't have different substructures depending on when they were checkpointed. - // In turn this allows us to maintain a more effective dedupe ratio in storage. --func (c *MemChunk) SerializeForCheckpoint(b []byte) (chk, head []byte, err error) { -- chk, err = c.BytesWith(b) -+func (c *MemChunk) SerializeForCheckpointTo(chk, head io.Writer) error { -+ _, err := c.WriteTo(chk) - if err != nil { -- return nil, nil, err -+ return err - } - - if c.head.isEmpty() { -- return chk, nil, nil -+ return nil - } - -- head, err = c.head.CheckpointBytes(c.format) -+ err = c.head.CheckpointTo(c.format, head) - if err != nil { -- return nil, nil, err -+ return err - } - -- return chk, head, nil -+ return nil -+} -+ -+func (c *MemChunk) CheckpointSize() (chunk, head int) { -+ return c.BytesSize(), c.head.CheckpointSize(c.format) - } - - func MemchunkFromCheckpoint(chk, head []byte, blockSize int, targetSize int) (*MemChunk, error) { -@@ -537,7 +593,6 @@ func (c *MemChunk) Utilization() float64 { - } - size := c.UncompressedSize() - return float64(size) / float64(blocksPerChunk*c.blockSize) -- - } - - // Append implements Chunk. -@@ -721,9 +776,11 @@ func (b block) Offset() int { - func (b block) Entries() int { - return b.numEntries - } -+ - func (b block) MinTime() int64 { - return b.mint - } -+ - func (b block) MaxTime() int64 { - return b.maxt - } -diff --git a/pkg/chunkenc/memchunk_test.go b/pkg/chunkenc/memchunk_test.go -index dfbb270830c03..ce63a3cbea05d 100644 ---- a/pkg/chunkenc/memchunk_test.go -+++ b/pkg/chunkenc/memchunk_test.go -@@ -65,6 +65,8 @@ func TestBlocksInclusive(t *testing.T) { - func TestBlock(t *testing.T) { - for _, enc := range testEncoding { - t.Run(enc.String(), func(t *testing.T) { -+ t.Parallel() -+ - chk := NewMemChunk(enc, testBlockSize, testTargetSize) - cases := []struct { - ts int64 -@@ -173,6 +175,8 @@ func TestBlock(t *testing.T) { - } - - func TestReadFormatV1(t *testing.T) { -+ t.Parallel() -+ - c := NewMemChunk(EncGZIP, testBlockSize, testTargetSize) - fillChunk(c) - // overrides default v2 format -@@ -211,6 +215,8 @@ func TestRoundtripV2(t *testing.T) { - for _, enc := range testEncoding { - for _, version := range []byte{chunkFormatV2, chunkFormatV3} { - t.Run(enc.String(), func(t *testing.T) { -+ t.Parallel() -+ - c := NewMemChunk(enc, testBlockSize, testTargetSize) - c.format = version - populated := fillChunk(c) -@@ -258,14 +264,14 @@ func TestRoundtripV2(t *testing.T) { - assertLines(loaded) - }) - } -- - } - } - - func TestRoundtripV3(t *testing.T) { -- - for _, enc := range testEncoding { - t.Run(enc.String(), func(t *testing.T) { -+ t.Parallel() -+ - c := NewMemChunk(enc, testBlockSize, testTargetSize) - c.format = chunkFormatV3 - _ = fillChunk(c) -@@ -281,15 +287,15 @@ func TestRoundtripV3(t *testing.T) { - r.head.clear() - - require.Equal(t, c, r) -- - }) - } -- - } - - func TestSerialization(t *testing.T) { - for _, enc := range testEncoding { - t.Run(enc.String(), func(t *testing.T) { -+ t.Parallel() -+ - chk := NewMemChunk(enc, testBlockSize, testTargetSize) - - numSamples := 50000 -@@ -337,6 +343,8 @@ func TestSerialization(t *testing.T) { - func TestChunkFilling(t *testing.T) { - for _, enc := range testEncoding { - t.Run(enc.String(), func(t *testing.T) { -+ t.Parallel() -+ - chk := NewMemChunk(enc, testBlockSize, 0) - chk.blockSize = 1024 - -@@ -374,6 +382,8 @@ func TestChunkFilling(t *testing.T) { - } - - func TestGZIPChunkTargetSize(t *testing.T) { -+ t.Parallel() -+ - chk := NewMemChunk(EncGZIP, testBlockSize, testTargetSize) - - lineSize := 512 -@@ -420,7 +430,6 @@ func TestGZIPChunkTargetSize(t *testing.T) { - ut := chk.Utilization() - require.Greater(t, ut, 0.99) - require.Less(t, ut, 1.01) -- - } - - func TestMemChunk_AppendOutOfOrder(t *testing.T) { -@@ -467,6 +476,7 @@ func TestMemChunk_AppendOutOfOrder(t *testing.T) { - func TestChunkSize(t *testing.T) { - for _, enc := range testEncoding { - t.Run(enc.String(), func(t *testing.T) { -+ t.Parallel() - c := NewMemChunk(enc, testBlockSize, testTargetSize) - inserted := fillChunk(c) - b, err := c.Bytes() -@@ -477,7 +487,6 @@ func TestChunkSize(t *testing.T) { - t.Log(""characters "", humanize.Bytes(uint64(inserted))) - t.Log(""Ratio"", float64(inserted)/float64(len(b))) - }) -- - } - } - -@@ -508,7 +517,6 @@ func TestChunkStats(t *testing.T) { - t.Fatal(err) - } - for it.Next() { -- - } - if err := it.Close(); err != nil { - t.Fatal(err) -@@ -537,7 +545,6 @@ func TestChunkStats(t *testing.T) { - t.Fatal(err) - } - for it.Next() { -- - } - if err := it.Close(); err != nil { - t.Fatal(err) -@@ -586,7 +593,6 @@ func TestIteratorClose(t *testing.T) { - } - test(iter, t) - } -- - }) - } - } -@@ -618,7 +624,6 @@ func BenchmarkWrite(b *testing.B) { - result = chunks - }) - } -- - } - - type nomatchPipeline struct{} -@@ -703,10 +708,8 @@ func TestGenerateDataSize(t *testing.T) { - } - - func BenchmarkHeadBlockIterator(b *testing.B) { -- - for _, j := range []int{100000, 50000, 15000, 10000} { - b.Run(fmt.Sprintf(""Size %d"", j), func(b *testing.B) { -- - h := headBlock{} - - for i := 0; i < j; i++ { -@@ -729,10 +732,8 @@ func BenchmarkHeadBlockIterator(b *testing.B) { - } - - func BenchmarkHeadBlockSampleIterator(b *testing.B) { -- - for _, j := range []int{100000, 50000, 15000, 10000} { - b.Run(fmt.Sprintf(""Size %d"", j), func(b *testing.B) { -- - h := headBlock{} - - for i := 0; i < j; i++ { -@@ -755,8 +756,7 @@ func BenchmarkHeadBlockSampleIterator(b *testing.B) { - } - - func TestMemChunk_IteratorBounds(t *testing.T) { -- -- var createChunk = func() *MemChunk { -+ createChunk := func() *MemChunk { - t.Helper() - c := NewMemChunk(EncNone, 1e6, 1e6) - -@@ -799,6 +799,8 @@ func TestMemChunk_IteratorBounds(t *testing.T) { - t.Run( - fmt.Sprintf(""mint:%d,maxt:%d,direction:%s"", tt.mint.UnixNano(), tt.maxt.UnixNano(), tt.direction), - func(t *testing.T) { -+ t.Parallel() -+ - tt := tt - c := createChunk() - -@@ -819,14 +821,14 @@ func TestMemChunk_IteratorBounds(t *testing.T) { - } - require.NoError(t, it.Close()) - }) -- - } -- - } - - func TestMemchunkLongLine(t *testing.T) { - for _, enc := range testEncoding { - t.Run(enc.String(), func(t *testing.T) { -+ t.Parallel() -+ - c := NewMemChunk(enc, testBlockSize, testTargetSize) - for i := 1; i <= 10; i++ { - require.NoError(t, c.Append(&logproto.Entry{Timestamp: time.Unix(0, int64(i)), Line: strings.Repeat(""e"", 200000)})) -@@ -843,6 +845,8 @@ func TestMemchunkLongLine(t *testing.T) { - - // Ensure passing a reusable []byte doesn't affect output - func TestBytesWith(t *testing.T) { -+ t.Parallel() -+ - exp, err := NewMemChunk(EncNone, testBlockSize, testTargetSize).BytesWith(nil) - require.Nil(t, err) - out, err := NewMemChunk(EncNone, testBlockSize, testTargetSize).BytesWith([]byte{1, 2, 3}) -@@ -852,6 +856,8 @@ func TestBytesWith(t *testing.T) { - } - - func TestHeadBlockCheckpointing(t *testing.T) { -+ t.Parallel() -+ - c := NewMemChunk(EncSnappy, 256*1024, 1500*1024) - - // add a few entries -@@ -867,7 +873,7 @@ func TestHeadBlockCheckpointing(t *testing.T) { - // ensure blocks are not cut - require.Equal(t, 0, len(c.blocks)) - -- b, err := c.head.CheckpointBytes(c.format) -+ b, err := c.head.CheckpointBytes(c.format, nil) - require.Nil(t, err) - - hb := &headBlock{} -@@ -876,6 +882,8 @@ func TestHeadBlockCheckpointing(t *testing.T) { - } - - func TestCheckpointEncoding(t *testing.T) { -+ t.Parallel() -+ - blockSize, targetSize := 256*1024, 1500*1024 - c := NewMemChunk(EncSnappy, blockSize, targetSize) - -@@ -905,10 +913,11 @@ func TestCheckpointEncoding(t *testing.T) { - // ensure new blocks are not cut - require.Equal(t, 1, len(c.blocks)) - -- chk, head, err := c.SerializeForCheckpoint(nil) -+ var chk, head bytes.Buffer -+ err := c.SerializeForCheckpointTo(&chk, &head) - require.Nil(t, err) - -- cpy, err := MemchunkFromCheckpoint(chk, head, blockSize, targetSize) -+ cpy, err := MemchunkFromCheckpoint(chk.Bytes(), head.Bytes(), blockSize, targetSize) - require.Nil(t, err) - - // TODO(owen-d): remove once v3+ is the default chunk version -@@ -921,8 +930,10 @@ func TestCheckpointEncoding(t *testing.T) { - require.Equal(t, c, cpy) - } - --var streams = []logproto.Stream{} --var series = []logproto.Series{} -+var ( -+ streams = []logproto.Stream{} -+ series = []logproto.Series{} -+) - - func BenchmarkBufferedIteratorLabels(b *testing.B) { - c := NewMemChunk(EncSnappy, testBlockSize, testTargetSize) -diff --git a/pkg/chunkenc/pool.go b/pkg/chunkenc/pool.go -index 8d37e2ffcee8a..49c6147402783 100644 ---- a/pkg/chunkenc/pool.go -+++ b/pkg/chunkenc/pool.go -@@ -47,14 +47,32 @@ var ( - New: func() interface{} { return bufio.NewReader(nil) }, - }, - } -+ - // BytesBufferPool is a bytes buffer used for lines decompressed. - // Buckets [0.5KB,1KB,2KB,4KB,8KB] -- BytesBufferPool = pool.New(1<<9, 1<<13, 2, func(size int) interface{} { return make([]byte, 0, size) }) -+ BytesBufferPool = pool.New(1<<9, 1<<13, 2, func(size int) interface{} { return make([]byte, 0, size) }) -+ -+ // Pool of crc32 hash -+ crc32HashPool = sync.Pool{ -+ New: func() interface{} { -+ return newCRC32() -+ }, -+ } -+ - serializeBytesBufferPool = sync.Pool{ - New: func() interface{} { - return &bytes.Buffer{} - }, - } -+ -+ // EncodeBufferPool is a pool used to binary encode. -+ EncodeBufferPool = sync.Pool{ -+ New: func() interface{} { -+ return &encbuf{ -+ b: make([]byte, 0, 256), -+ } -+ }, -+ } - ) - - func getWriterPool(enc Encoding) WriterPool { -diff --git a/pkg/ingester/checkpoint.go b/pkg/ingester/checkpoint.go -index 31a8f29163c9c..9a04667a17821 100644 ---- a/pkg/ingester/checkpoint.go -+++ b/pkg/ingester/checkpoint.go -@@ -1,6 +1,7 @@ - package ingester - - import ( -+ ""bytes"" - fmt ""fmt"" - ""io/ioutil"" - ""os"" -@@ -16,44 +17,73 @@ import ( - ""github.com/go-kit/kit/log/level"" - ""github.com/gogo/protobuf/proto"" - ""github.com/pkg/errors"" -- ""github.com/prometheus/prometheus/pkg/pool"" -+ prompool ""github.com/prometheus/prometheus/pkg/pool"" - tsdb_errors ""github.com/prometheus/prometheus/tsdb/errors"" - ""github.com/prometheus/prometheus/tsdb/fileutil"" - ""github.com/prometheus/prometheus/tsdb/wal"" - - ""github.com/grafana/loki/pkg/chunkenc"" -+ ""github.com/grafana/loki/pkg/util/pool"" - ) - -+var ( -+ // todo(ctovena) those pools should be in factor of the actual configuration (blocksize, targetsize). -+ // Starting with something sane first then we can refine with more experience. -+ -+ // Buckets [1KB 2KB 4KB 16KB 32KB to 4MB] by 2 -+ blocksBufferPool = pool.NewBuffer(1024, 4*1024*1024, 2) -+ // Buckets [64B 128B 256B 512B... to 2MB] by 2 -+ headBufferPool = pool.NewBuffer(64, 2*1024*1024, 2) -+) -+ -+type chunkWithBuffer struct { -+ blocks, head *bytes.Buffer -+ Chunk -+} -+ - // The passed wireChunks slice is for re-use. --func toWireChunks(descs []chunkDesc, wireChunks []Chunk) ([]Chunk, error) { -+func toWireChunks(descs []chunkDesc, wireChunks []chunkWithBuffer) ([]chunkWithBuffer, error) { -+ // release memory from previous list of chunks. -+ for _, wc := range wireChunks { -+ blocksBufferPool.Put(wc.blocks) -+ headBufferPool.Put(wc.head) -+ wc.Data = nil -+ wc.Head = nil -+ } -+ - if cap(wireChunks) < len(descs) { -- wireChunks = make([]Chunk, len(descs)) -+ wireChunks = make([]chunkWithBuffer, len(descs)) - } else { - wireChunks = wireChunks[:len(descs)] - } -+ - for i, d := range descs { - from, to := d.chunk.Bounds() -- wireChunk := Chunk{ -- From: from, -- To: to, -- Closed: d.closed, -- FlushedAt: d.flushed, -- LastUpdated: d.lastUpdated, -- Synced: d.synced, -- } -- -- slice := wireChunks[i].Data[:0] // try to re-use the memory from last time -- if cap(slice) < d.chunk.CompressedSize() { -- slice = make([]byte, 0, d.chunk.CompressedSize()) -+ chunkSize, headSize := d.chunk.CheckpointSize() -+ -+ wireChunk := chunkWithBuffer{ -+ Chunk: Chunk{ -+ From: from, -+ To: to, -+ Closed: d.closed, -+ FlushedAt: d.flushed, -+ LastUpdated: d.lastUpdated, -+ Synced: d.synced, -+ }, -+ blocks: blocksBufferPool.Get(chunkSize), -+ head: headBufferPool.Get(headSize), - } - -- chk, head, err := d.chunk.SerializeForCheckpoint(slice) -+ err := d.chunk.SerializeForCheckpointTo( -+ wireChunk.blocks, -+ wireChunk.head, -+ ) - if err != nil { - return nil, err - } - -- wireChunk.Data = chk -- wireChunk.Head = head -+ wireChunk.Data = wireChunk.blocks.Bytes() -+ wireChunk.Head = wireChunk.head.Bytes() - wireChunks[i] = wireChunk - } - return wireChunks, nil -@@ -118,7 +148,7 @@ type SeriesWithErr struct { - - type SeriesIter interface { - Count() int -- Iter() <-chan *SeriesWithErr -+ Iter() *streamIterator - Stop() - } - -@@ -150,56 +180,108 @@ func (i *ingesterSeriesIter) Stop() { - close(i.done) - } - --func (i *ingesterSeriesIter) Iter() <-chan *SeriesWithErr { -- ch := make(chan *SeriesWithErr) -- go func() { -- for _, inst := range i.ing.getInstances() { -- inst.streamsMtx.RLock() -- // Need to buffer streams internally so the read lock isn't held trying to write to a blocked channel. -- streams := make([]*stream, 0, len(inst.streams)) -- inst.streamsMtx.RUnlock() -- _ = inst.forAllStreams(func(stream *stream) error { -- streams = append(streams, stream) -- return nil -- }) -- -- for _, stream := range streams { -- stream.chunkMtx.RLock() -- if len(stream.chunks) < 1 { -- stream.chunkMtx.RUnlock() -- // it's possible the stream has been flushed to storage -- // in between starting the checkpointing process and -- // checkpointing this stream. -- continue -- } -- -- // TODO(owen-d): use a pool -- // Only send chunks for checkpointing that have yet to be flushed. -- chunks, err := toWireChunks(unflushedChunks(stream.chunks), nil) -- stream.chunkMtx.RUnlock() -- -- var s *Series -- if err == nil { -- s = &Series{ -- UserID: inst.instanceID, -- Fingerprint: uint64(stream.fp), -- Labels: client.FromLabelsToLabelAdapters(stream.labels), -- Chunks: chunks, -- } -- } -- select { -- case ch <- &SeriesWithErr{ -- Err: err, -- Series: s, -- }: -- case <-i.done: -- return -- } -- } -+func (i *ingesterSeriesIter) Iter() *streamIterator { -+ return newStreamsIterator(i.ing) -+} -+ -+type streamInstance struct { -+ id string -+ streams []*stream -+} -+ -+type streamIterator struct { -+ instances []streamInstance -+ -+ current Series -+ buffer []chunkWithBuffer -+ err error -+} -+ -+// newStreamsIterator returns a new stream iterators that iterates over one instance at a time, then -+// each stream per instances. -+func newStreamsIterator(ing ingesterInstances) *streamIterator { -+ instances := ing.getInstances() -+ streamInstances := make([]streamInstance, len(instances)) -+ for i, inst := range ing.getInstances() { -+ inst.streamsMtx.RLock() -+ streams := make([]*stream, 0, len(inst.streams)) -+ inst.streamsMtx.RUnlock() -+ _ = inst.forAllStreams(func(s *stream) error { -+ streams = append(streams, s) -+ return nil -+ }) -+ streamInstances[i] = streamInstance{ -+ streams: streams, -+ id: inst.instanceID, - } -- close(ch) -- }() -- return ch -+ } -+ return &streamIterator{ -+ instances: streamInstances, -+ } -+} -+ -+// Next loads the next stream of the current instance. -+// If the instance is empty, it moves to the next instance until there is no more. -+// Return true if there's a next stream, each successful calls will replace the current stream. -+func (s *streamIterator) Next() bool { -+ if len(s.instances) == 0 { -+ s.instances = nil -+ return false -+ } -+ currentInstance := s.instances[0] -+ if len(currentInstance.streams) == 0 { -+ s.instances = s.instances[1:] -+ return s.Next() -+ } -+ -+ // current stream -+ stream := currentInstance.streams[0] -+ -+ // remove the first stream -+ s.instances[0].streams = s.instances[0].streams[1:] -+ -+ stream.chunkMtx.RLock() -+ defer stream.chunkMtx.RUnlock() -+ -+ if len(stream.chunks) < 1 { -+ // it's possible the stream has been flushed to storage -+ // in between starting the checkpointing process and -+ // checkpointing this stream. -+ return s.Next() -+ } -+ chunks, err := toWireChunks(stream.chunks, s.buffer) -+ if err != nil { -+ s.err = err -+ return false -+ } -+ s.buffer = chunks -+ -+ s.current.Chunks = s.current.Chunks[:0] -+ if cap(s.current.Chunks) == 0 { -+ s.current.Chunks = make([]Chunk, 0, len(chunks)) -+ } -+ -+ for _, c := range chunks { -+ s.current.Chunks = append(s.current.Chunks, c.Chunk) -+ } -+ -+ s.current.UserID = currentInstance.id -+ s.current.Fingerprint = uint64(stream.fp) -+ s.current.Labels = client.FromLabelsToLabelAdapters(stream.labels) -+ -+ return true -+} -+ -+// Err returns an errors thrown while iterating over the streams. -+func (s *streamIterator) Error() error { -+ return s.err -+} -+ -+// Stream is serializable (for checkpointing) stream of chunks. -+// NOTE: the series is re-used between successful Next calls. -+// This means you should make a copy or use the data before calling Next. -+func (s *streamIterator) Stream() *Series { -+ return &s.current - } - - type CheckpointWriter interface { -@@ -275,7 +357,7 @@ func (w *WALCheckpointWriter) Advance() (bool, error) { - } - - // Buckets [64KB to 256MB] by 2 --var recordBufferPool = pool.New(1<<16, 1<<28, 2, func(size int) interface{} { return make([]byte, 0, size) }) -+var recordBufferPool = prompool.New(1<<16, 1<<28, 2, func(size int) interface{} { return make([]byte, 0, size) }) - - func (w *WALCheckpointWriter) Write(s *Series) error { - size := s.Size() + 1 // +1 for header -@@ -483,11 +565,10 @@ func (c *Checkpointer) PerformCheckpoint() (err error) { - level.Info(util.Logger).Log(""msg"", ""checkpoint done"", ""time"", elapsed.String()) - c.metrics.checkpointDuration.Observe(elapsed.Seconds()) - }() -- for s := range c.iter.Iter() { -- if s.Err != nil { -- return s.Err -- } -- if err := c.writer.Write(s.Series); err != nil { -+ -+ iter := c.iter.Iter() -+ for iter.Next() { -+ if err := c.writer.Write(iter.Stream()); err != nil { - return err - } - -@@ -508,6 +589,10 @@ func (c *Checkpointer) PerformCheckpoint() (err error) { - - } - -+ if iter.Error() != nil { -+ return iter.Error() -+ } -+ - return c.writer.Close(false) - } - -diff --git a/pkg/ingester/checkpoint_test.go b/pkg/ingester/checkpoint_test.go -index e7b7a268941ad..a0ccc84b40cc9 100644 ---- a/pkg/ingester/checkpoint_test.go -+++ b/pkg/ingester/checkpoint_test.go -@@ -5,6 +5,7 @@ import ( - fmt ""fmt"" - ""io/ioutil"" - ""os"" -+ ""sort"" - ""testing"" - ""time"" - -@@ -12,12 +13,14 @@ import ( - cortex_client ""github.com/cortexproject/cortex/pkg/ingester/client"" - ""github.com/cortexproject/cortex/pkg/util/services"" - ""github.com/prometheus/prometheus/pkg/labels"" -+ ""github.com/stretchr/testify/assert"" - ""github.com/stretchr/testify/require"" - ""github.com/weaveworks/common/user"" - - ""github.com/grafana/loki/pkg/chunkenc"" - ""github.com/grafana/loki/pkg/ingester/client"" - ""github.com/grafana/loki/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/logql/log"" - ""github.com/grafana/loki/pkg/util/validation"" - ) - -@@ -254,6 +257,150 @@ func expectCheckpoint(t *testing.T, walDir string, shouldExist bool) { - require.True(t, found == shouldExist) - } - -+type ingesterInstancesFunc func() []*instance -+ -+func (i ingesterInstancesFunc) getInstances() []*instance { -+ return i() -+} -+ -+var currentSeries *Series -+ -+func buildStreams() []logproto.Stream { -+ streams := make([]logproto.Stream, 10) -+ for i := range streams { -+ labels := makeRandomLabels().String() -+ entries := make([]logproto.Entry, 15*1e3) -+ for j := range entries { -+ entries[j] = logproto.Entry{ -+ Timestamp: time.Unix(0, int64(j)), -+ Line: fmt.Sprintf(""entry for line %d"", j), -+ } -+ } -+ streams[i] = logproto.Stream{ -+ Labels: labels, -+ Entries: entries, -+ } -+ } -+ return streams -+} -+ -+var ( -+ stream1 = logproto.Stream{ -+ Labels: labels.Labels{labels.Label{Name: ""stream"", Value: ""1""}}.String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 1), -+ Line: ""1"", -+ }, -+ { -+ Timestamp: time.Unix(0, 2), -+ Line: ""2"", -+ }, -+ }, -+ } -+ stream2 = logproto.Stream{ -+ Labels: labels.Labels{labels.Label{Name: ""stream"", Value: ""2""}}.String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 1), -+ Line: ""3"", -+ }, -+ { -+ Timestamp: time.Unix(0, 2), -+ Line: ""4"", -+ }, -+ }, -+ } -+) -+ -+func Test_SeriesIterator(t *testing.T) { -+ var instances []*instance -+ -+ limits, err := validation.NewOverrides(validation.Limits{ -+ MaxLocalStreamsPerUser: 1000, -+ IngestionRateMB: 1e4, -+ IngestionBurstSizeMB: 1e4, -+ }, nil) -+ require.NoError(t, err) -+ limiter := NewLimiter(limits, &ringCountMock{count: 1}, 1) -+ -+ for i := 0; i < 3; i++ { -+ inst := newInstance(defaultConfig(), fmt.Sprintf(""%d"", i), limiter, noopWAL{}, NilMetrics, nil) -+ require.NoError(t, inst.Push(context.Background(), &logproto.PushRequest{Streams: []logproto.Stream{stream1}})) -+ require.NoError(t, inst.Push(context.Background(), &logproto.PushRequest{Streams: []logproto.Stream{stream2}})) -+ instances = append(instances, inst) -+ } -+ -+ iter := newStreamsIterator(ingesterInstancesFunc(func() []*instance { -+ return instances -+ })) -+ -+ for i := 0; i < 3; i++ { -+ var streams []logproto.Stream -+ for j := 0; j < 2; j++ { -+ iter.Next() -+ assert.Equal(t, fmt.Sprintf(""%d"", i), iter.Stream().UserID) -+ memchunk, err := chunkenc.MemchunkFromCheckpoint(iter.Stream().Chunks[0].Data, iter.Stream().Chunks[0].Head, 0, 0) -+ require.NoError(t, err) -+ it, err := memchunk.Iterator(context.Background(), time.Unix(0, 0), time.Unix(0, 100), logproto.FORWARD, log.NewNoopPipeline().ForStream(nil)) -+ require.NoError(t, err) -+ stream := logproto.Stream{ -+ Labels: cortex_client.FromLabelAdaptersToLabels(iter.Stream().Labels).String(), -+ } -+ for it.Next() { -+ stream.Entries = append(stream.Entries, it.Entry()) -+ } -+ require.NoError(t, it.Close()) -+ streams = append(streams, stream) -+ } -+ sort.Slice(streams, func(i, j int) bool { return streams[i].Labels < streams[j].Labels }) -+ require.Equal(t, stream1, streams[0]) -+ require.Equal(t, stream2, streams[1]) -+ } -+ -+ require.False(t, iter.Next()) -+ require.Nil(t, iter.Error()) -+} -+ -+func Benchmark_SeriesIterator(b *testing.B) { -+ streams := buildStreams() -+ instances := make([]*instance, 10) -+ -+ limits, err := validation.NewOverrides(validation.Limits{ -+ MaxLocalStreamsPerUser: 1000, -+ IngestionRateMB: 1e4, -+ IngestionBurstSizeMB: 1e4, -+ }, nil) -+ require.NoError(b, err) -+ limiter := NewLimiter(limits, &ringCountMock{count: 1}, 1) -+ -+ for i := range instances { -+ inst := newInstance(defaultConfig(), fmt.Sprintf(""instance %d"", i), limiter, noopWAL{}, NilMetrics, nil) -+ -+ require.NoError(b, -+ inst.Push(context.Background(), &logproto.PushRequest{ -+ Streams: streams, -+ }), -+ ) -+ instances[i] = inst -+ } -+ it := newIngesterSeriesIter(ingesterInstancesFunc(func() []*instance { -+ return instances -+ })) -+ defer it.Stop() -+ -+ b.ResetTimer() -+ b.ReportAllocs() -+ -+ for n := 0; n < b.N; n++ { -+ iter := it.Iter() -+ for iter.Next() { -+ currentSeries = iter.Stream() -+ } -+ require.NoError(b, iter.Error()) -+ } -+} -+ - type noOpWalLogger struct{} - - func (noOpWalLogger) Log(recs ...[]byte) error { return nil } -@@ -282,6 +429,7 @@ func Benchmark_CheckpointWrite(b *testing.B) { - - func buildChunks(t testing.TB, size int) []Chunk { - descs := make([]chunkDesc, 0, size) -+ chks := make([]Chunk, size) - - for i := 0; i < size; i++ { - // build chunks of 256k blocks, 1.5MB target size. Same as default config. -@@ -294,5 +442,8 @@ func buildChunks(t testing.TB, size int) []Chunk { - - there, err := toWireChunks(descs, nil) - require.NoError(t, err) -- return there -+ for i := range there { -+ chks[i] = there[i].Chunk -+ } -+ return chks - } -diff --git a/pkg/ingester/encoding_test.go b/pkg/ingester/encoding_test.go -index 6be331c16ae38..4762873657c68 100644 ---- a/pkg/ingester/encoding_test.go -+++ b/pkg/ingester/encoding_test.go -@@ -164,7 +164,11 @@ func Test_EncodingChunks(t *testing.T) { - } - there, err := toWireChunks(from, nil) - require.Nil(t, err) -- backAgain, err := fromWireChunks(conf, there) -+ chunks := make([]Chunk, 0, len(there)) -+ for _, c := range there { -+ chunks = append(chunks, c.Chunk) -+ } -+ backAgain, err := fromWireChunks(conf, chunks) - require.Nil(t, err) - - for i, to := range backAgain { -diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go -index 3255f50f79533..2210f5d2e4418 100644 ---- a/pkg/ingester/flush_test.go -+++ b/pkg/ingester/flush_test.go -@@ -38,7 +38,7 @@ const ( - ) - - func init() { -- //util.Logger = log.NewLogfmtLogger(os.Stdout) -+ // util.Logger = log.NewLogfmtLogger(os.Stdout) - } - - func TestChunkFlushingIdle(t *testing.T) { -diff --git a/pkg/util/pool/bytesbuffer.go b/pkg/util/pool/bytesbuffer.go -new file mode 100644 -index 0000000000000..5e1da7b8bb366 ---- /dev/null -+++ b/pkg/util/pool/bytesbuffer.go -@@ -0,0 +1,69 @@ -+package pool -+ -+import ( -+ ""bytes"" -+ ""sync"" -+) -+ -+// BufferPool is a bucketed pool for variably bytes buffers. -+type BufferPool struct { -+ buckets []sync.Pool -+ sizes []int -+} -+ -+// NewBuffer a new Pool with size buckets for minSize to maxSize -+// increasing by the given factor. -+func NewBuffer(minSize, maxSize int, factor float64) *BufferPool { -+ if minSize < 1 { -+ panic(""invalid minimum pool size"") -+ } -+ if maxSize < 1 { -+ panic(""invalid maximum pool size"") -+ } -+ if factor < 1 { -+ panic(""invalid factor"") -+ } -+ -+ var sizes []int -+ -+ for s := minSize; s <= maxSize; s = int(float64(s) * factor) { -+ sizes = append(sizes, s) -+ } -+ -+ return &BufferPool{ -+ buckets: make([]sync.Pool, len(sizes)), -+ sizes: sizes, -+ } -+} -+ -+// Get returns a byte buffer that fits the given size. -+func (p *BufferPool) Get(sz int) *bytes.Buffer { -+ for i, bktSize := range p.sizes { -+ if sz > bktSize { -+ continue -+ } -+ b := p.buckets[i].Get() -+ if b == nil { -+ b = bytes.NewBuffer(make([]byte, bktSize)) -+ } -+ buf := b.(*bytes.Buffer) -+ buf.Reset() -+ return b.(*bytes.Buffer) -+ } -+ return bytes.NewBuffer(make([]byte, sz)) -+} -+ -+// Put adds a byte buffer to the right bucket in the pool. -+func (p *BufferPool) Put(s *bytes.Buffer) { -+ if s == nil { -+ return -+ } -+ cap := s.Cap() -+ for i, size := range p.sizes { -+ if cap > size { -+ continue -+ } -+ p.buckets[i].Put(s) -+ return -+ } -+}",unknown,"Improve checkpoint series iterator. (#3193) - -* Add basic benchmark. - -Signed-off-by: Cyril Tovena - -* Improves memory usage of checkpointer series iterator. - -Signed-off-by: Cyril Tovena - -* make lint. - -Signed-off-by: Cyril Tovena - -* better size computation. - -Signed-off-by: Cyril Tovena - -* Fixes test ordering flakyness. - -Signed-off-by: Cyril Tovena " -7713ff78e2627b835d1ad73afb3695d25943ebf1,2022-01-25 18:30:23,Kaviraj Kanagaraj,"Fix `cortexpb` -> `logproto` rename in some tests (#5231) - -* Fix `cortexpg` -> `logproto` rename in some tests - -Signed-off-by: Kaviraj - -* Fix TimestampMs rename - -Signed-off-by: Kaviraj - -* queryrange test - -Signed-off-by: Kaviraj - -* Fix tests on queryrangebase pacakage\ - -Signed-off-by: Kaviraj - -* Fix some typos - -Signed-off-by: Kaviraj ",False,"diff --git a/pkg/querier/base/active-query-tracker/queries.active b/pkg/querier/base/active-query-tracker/queries.active -new file mode 100644 -index 0000000000000..8bfef0eabd496 -Binary files /dev/null and b/pkg/querier/base/active-query-tracker/queries.active differ -diff --git a/pkg/querier/queryrange/queryrangebase/marshaling_test.go b/pkg/querier/queryrange/queryrangebase/marshaling_test.go -index 955735bcefd82..0d936b433d12e 100644 ---- a/pkg/querier/queryrange/queryrangebase/marshaling_test.go -+++ b/pkg/querier/queryrange/queryrangebase/marshaling_test.go -@@ -8,8 +8,9 @@ import ( - ""net/http"" - ""testing"" - -- ""github.com/cortexproject/cortex/pkg/cortexpb"" - ""github.com/stretchr/testify/require"" -+ -+ ""github.com/grafana/loki/pkg/logproto"" - ) - - func BenchmarkPrometheusCodec_DecodeResponse(b *testing.B) { -@@ -59,16 +60,16 @@ func mockPrometheusResponse(numSeries, numSamplesPerSeries int) *PrometheusRespo - stream := make([]SampleStream, numSeries) - for s := 0; s < numSeries; s++ { - // Generate random samples. -- samples := make([]cortexpb.Sample, numSamplesPerSeries) -+ samples := make([]logproto.Sample, numSamplesPerSeries) - for i := 0; i < numSamplesPerSeries; i++ { -- samples[i] = cortexpb.Sample{ -- Value: rand.Float64(), -- TimestampMs: int64(i), -+ samples[i] = logproto.Sample{ -+ Value: rand.Float64(), -+ Timestamp: int64(i), - } - } - - // Generate random labels. -- lbls := make([]cortexpb.LabelAdapter, 10) -+ lbls := make([]logproto.LabelAdapter, 10) - for i := range lbls { - lbls[i].Name = ""a_medium_size_label_name"" - lbls[i].Value = ""a_medium_size_label_value_that_is_used_to_benchmark_marshalling"" -diff --git a/pkg/querier/queryrange/queryrangebase/query_range_test.go b/pkg/querier/queryrange/queryrangebase/query_range_test.go -index 09c55ffd77f9c..81b7f81595c1d 100644 ---- a/pkg/querier/queryrange/queryrangebase/query_range_test.go -+++ b/pkg/querier/queryrange/queryrangebase/query_range_test.go -@@ -8,12 +8,13 @@ import ( - ""strconv"" - ""testing"" - -- ""github.com/cortexproject/cortex/pkg/cortexpb"" - jsoniter ""github.com/json-iterator/go"" - ""github.com/stretchr/testify/assert"" - ""github.com/stretchr/testify/require"" - ""github.com/weaveworks/common/httpgrpc"" - ""github.com/weaveworks/common/user"" -+ -+ ""github.com/grafana/loki/pkg/logproto"" - ) - - func TestRequest(t *testing.T) { -@@ -185,10 +186,10 @@ func TestMergeAPIResponses(t *testing.T) { - ResultType: matrix, - Result: []SampleStream{ - { -- Labels: []cortexpb.LabelAdapter{}, -- Samples: []cortexpb.Sample{ -- {Value: 0, TimestampMs: 0}, -- {Value: 1, TimestampMs: 1}, -+ Labels: []logproto.LabelAdapter{}, -+ Samples: []logproto.Sample{ -+ {Value: 0, Timestamp: 0}, -+ {Value: 1, Timestamp: 1}, - }, - }, - }, -@@ -199,10 +200,10 @@ func TestMergeAPIResponses(t *testing.T) { - ResultType: matrix, - Result: []SampleStream{ - { -- Labels: []cortexpb.LabelAdapter{}, -- Samples: []cortexpb.Sample{ -- {Value: 2, TimestampMs: 2}, -- {Value: 3, TimestampMs: 3}, -+ Labels: []logproto.LabelAdapter{}, -+ Samples: []logproto.Sample{ -+ {Value: 2, Timestamp: 2}, -+ {Value: 3, Timestamp: 3}, - }, - }, - }, -@@ -215,12 +216,12 @@ func TestMergeAPIResponses(t *testing.T) { - ResultType: matrix, - Result: []SampleStream{ - { -- Labels: []cortexpb.LabelAdapter{}, -- Samples: []cortexpb.Sample{ -- {Value: 0, TimestampMs: 0}, -- {Value: 1, TimestampMs: 1}, -- {Value: 2, TimestampMs: 2}, -- {Value: 3, TimestampMs: 3}, -+ Labels: []logproto.LabelAdapter{}, -+ Samples: []logproto.Sample{ -+ {Value: 0, Timestamp: 0}, -+ {Value: 1, Timestamp: 1}, -+ {Value: 2, Timestamp: 2}, -+ {Value: 3, Timestamp: 3}, - }, - }, - }, -@@ -240,12 +241,12 @@ func TestMergeAPIResponses(t *testing.T) { - ResultType: matrix, - Result: []SampleStream{ - { -- Labels: []cortexpb.LabelAdapter{{Name: ""a"", Value: ""b""}, {Name: ""c"", Value: ""d""}}, -- Samples: []cortexpb.Sample{ -- {Value: 0, TimestampMs: 0}, -- {Value: 1, TimestampMs: 1000}, -- {Value: 2, TimestampMs: 2000}, -- {Value: 3, TimestampMs: 3000}, -+ Labels: []logproto.LabelAdapter{{Name: ""a"", Value: ""b""}, {Name: ""c"", Value: ""d""}}, -+ Samples: []logproto.Sample{ -+ {Value: 0, Timestamp: 0}, -+ {Value: 1, Timestamp: 1000}, -+ {Value: 2, Timestamp: 2000}, -+ {Value: 3, Timestamp: 3000}, - }, - }, - }, -@@ -265,11 +266,11 @@ func TestMergeAPIResponses(t *testing.T) { - ResultType: matrix, - Result: []SampleStream{ - { -- Labels: []cortexpb.LabelAdapter{{Name: ""a"", Value: ""b""}, {Name: ""c"", Value: ""d""}}, -- Samples: []cortexpb.Sample{ -- {Value: 1, TimestampMs: 1000}, -- {Value: 2, TimestampMs: 2000}, -- {Value: 3, TimestampMs: 3000}, -+ Labels: []logproto.LabelAdapter{{Name: ""a"", Value: ""b""}, {Name: ""c"", Value: ""d""}}, -+ Samples: []logproto.Sample{ -+ {Value: 1, Timestamp: 1000}, -+ {Value: 2, Timestamp: 2000}, -+ {Value: 3, Timestamp: 3000}, - }, - }, - }, -@@ -288,13 +289,13 @@ func TestMergeAPIResponses(t *testing.T) { - ResultType: matrix, - Result: []SampleStream{ - { -- Labels: []cortexpb.LabelAdapter{{Name: ""a"", Value: ""b""}, {Name: ""c"", Value: ""d""}}, -- Samples: []cortexpb.Sample{ -- {Value: 1, TimestampMs: 1000}, -- {Value: 2, TimestampMs: 2000}, -- {Value: 3, TimestampMs: 3000}, -- {Value: 4, TimestampMs: 4000}, -- {Value: 5, TimestampMs: 5000}, -+ Labels: []logproto.LabelAdapter{{Name: ""a"", Value: ""b""}, {Name: ""c"", Value: ""d""}}, -+ Samples: []logproto.Sample{ -+ {Value: 1, Timestamp: 1000}, -+ {Value: 2, Timestamp: 2000}, -+ {Value: 3, Timestamp: 3000}, -+ {Value: 4, Timestamp: 4000}, -+ {Value: 5, Timestamp: 5000}, - }, - }, - }, -@@ -313,12 +314,12 @@ func TestMergeAPIResponses(t *testing.T) { - ResultType: matrix, - Result: []SampleStream{ - { -- Labels: []cortexpb.LabelAdapter{{Name: ""a"", Value: ""b""}, {Name: ""c"", Value: ""d""}}, -- Samples: []cortexpb.Sample{ -- {Value: 2, TimestampMs: 2000}, -- {Value: 3, TimestampMs: 3000}, -- {Value: 4, TimestampMs: 4000}, -- {Value: 5, TimestampMs: 5000}, -+ Labels: []logproto.LabelAdapter{{Name: ""a"", Value: ""b""}, {Name: ""c"", Value: ""d""}}, -+ Samples: []logproto.Sample{ -+ {Value: 2, Timestamp: 2000}, -+ {Value: 3, Timestamp: 3000}, -+ {Value: 4, Timestamp: 4000}, -+ {Value: 5, Timestamp: 5000}, - }, - }, - }, -diff --git a/pkg/querier/queryrange/queryrangebase/queryable_test.go b/pkg/querier/queryrange/queryrangebase/queryable_test.go -index ed1de014bfc63..b0193a86c4a78 100644 ---- a/pkg/querier/queryrange/queryrangebase/queryable_test.go -+++ b/pkg/querier/queryrange/queryrangebase/queryable_test.go -@@ -4,12 +4,12 @@ import ( - ""context"" - ""testing"" - -- ""github.com/cortexproject/cortex/pkg/cortexpb"" - ""github.com/pkg/errors"" - ""github.com/prometheus/prometheus/model/labels"" - ""github.com/prometheus/prometheus/promql/parser"" - ""github.com/stretchr/testify/require"" - -+ ""github.com/grafana/loki/pkg/logproto"" - ""github.com/grafana/loki/pkg/querier/astmapper"" - ) - -@@ -91,34 +91,34 @@ func TestSelect(t *testing.T) { - ResultType: string(parser.ValueTypeVector), - Result: []SampleStream{ - { -- Labels: []cortexpb.LabelAdapter{ -+ Labels: []logproto.LabelAdapter{ - {Name: ""a"", Value: ""a1""}, - {Name: ""b"", Value: ""b1""}, - }, -- Samples: []cortexpb.Sample{ -+ Samples: []logproto.Sample{ - { -- Value: 1, -- TimestampMs: 1, -+ Value: 1, -+ Timestamp: 1, - }, - { -- Value: 2, -- TimestampMs: 2, -+ Value: 2, -+ Timestamp: 2, - }, - }, - }, - { -- Labels: []cortexpb.LabelAdapter{ -+ Labels: []logproto.LabelAdapter{ - {Name: ""a"", Value: ""a1""}, - {Name: ""b"", Value: ""b1""}, - }, -- Samples: []cortexpb.Sample{ -+ Samples: []logproto.Sample{ - { -- Value: 8, -- TimestampMs: 1, -+ Value: 8, -+ Timestamp: 1, - }, - { -- Value: 9, -- TimestampMs: 2, -+ Value: 9, -+ Timestamp: 2, - }, - }, - }, -@@ -141,34 +141,34 @@ func TestSelect(t *testing.T) { - t, - NewSeriesSet([]SampleStream{ - { -- Labels: []cortexpb.LabelAdapter{ -+ Labels: []logproto.LabelAdapter{ - {Name: ""a"", Value: ""a1""}, - {Name: ""b"", Value: ""b1""}, - }, -- Samples: []cortexpb.Sample{ -+ Samples: []logproto.Sample{ - { -- Value: 1, -- TimestampMs: 1, -+ Value: 1, -+ Timestamp: 1, - }, - { -- Value: 2, -- TimestampMs: 2, -+ Value: 2, -+ Timestamp: 2, - }, - }, - }, - { -- Labels: []cortexpb.LabelAdapter{ -+ Labels: []logproto.LabelAdapter{ - {Name: ""a"", Value: ""a1""}, - {Name: ""b"", Value: ""b1""}, - }, -- Samples: []cortexpb.Sample{ -+ Samples: []logproto.Sample{ - { -- Value: 8, -- TimestampMs: 1, -+ Value: 8, -+ Timestamp: 1, - }, - { -- Value: 9, -- TimestampMs: 2, -+ Value: 9, -+ Timestamp: 2, - }, - }, - }, -@@ -219,13 +219,13 @@ func TestSelectConcurrent(t *testing.T) { - ResultType: string(parser.ValueTypeVector), - Result: []SampleStream{ - { -- Labels: []cortexpb.LabelAdapter{ -+ Labels: []logproto.LabelAdapter{ - {Name: ""a"", Value: ""1""}, - }, -- Samples: []cortexpb.Sample{ -+ Samples: []logproto.Sample{ - { -- Value: 1, -- TimestampMs: 1, -+ Value: 1, -+ Timestamp: 1, - }, - }, - }, -diff --git a/pkg/querier/queryrange/queryrangebase/querysharding_test.go b/pkg/querier/queryrange/queryrangebase/querysharding_test.go -index b32a8351c79e8..3617ce70d0d1b 100644 ---- a/pkg/querier/queryrange/queryrangebase/querysharding_test.go -+++ b/pkg/querier/queryrange/queryrangebase/querysharding_test.go -@@ -8,7 +8,6 @@ import ( - ""testing"" - ""time"" - -- ""github.com/cortexproject/cortex/pkg/cortexpb"" - ""github.com/cortexproject/cortex/pkg/util"" - ""github.com/go-kit/log"" - ""github.com/pkg/errors"" -@@ -18,6 +17,7 @@ import ( - ""github.com/prometheus/prometheus/storage"" - ""github.com/stretchr/testify/require"" - -+ ""github.com/grafana/loki/pkg/logproto"" - ""github.com/grafana/loki/pkg/storage/chunk"" - ) - -@@ -122,34 +122,34 @@ func sampleMatrixResponse() *PrometheusResponse { - ResultType: string(parser.ValueTypeMatrix), - Result: []SampleStream{ - { -- Labels: []cortexpb.LabelAdapter{ -+ Labels: []logproto.LabelAdapter{ - {Name: ""a"", Value: ""a1""}, - {Name: ""b"", Value: ""b1""}, - }, -- Samples: []cortexpb.Sample{ -+ Samples: []logproto.Sample{ - { -- TimestampMs: 5, -- Value: 1, -+ Timestamp: 5, -+ Value: 1, - }, - { -- TimestampMs: 10, -- Value: 2, -+ Timestamp: 10, -+ Value: 2, - }, - }, - }, - { -- Labels: []cortexpb.LabelAdapter{ -+ Labels: []logproto.LabelAdapter{ - {Name: ""a"", Value: ""a1""}, - {Name: ""b"", Value: ""b1""}, - }, -- Samples: []cortexpb.Sample{ -+ Samples: []logproto.Sample{ - { -- TimestampMs: 5, -- Value: 8, -+ Timestamp: 5, -+ Value: 8, - }, - { -- TimestampMs: 10, -- Value: 9, -+ Timestamp: 10, -+ Value: 9, - }, - }, - }, -diff --git a/pkg/querier/queryrange/queryrangebase/results_cache_test.go b/pkg/querier/queryrange/queryrangebase/results_cache_test.go -index 75756b3b06d4e..8296c6b978303 100644 ---- a/pkg/querier/queryrange/queryrangebase/results_cache_test.go -+++ b/pkg/querier/queryrange/queryrangebase/results_cache_test.go -@@ -7,7 +7,6 @@ import ( - ""testing"" - ""time"" - -- ""github.com/cortexproject/cortex/pkg/cortexpb"" - ""github.com/go-kit/log"" - ""github.com/gogo/protobuf/types"" - ""github.com/grafana/dskit/flagext"" -@@ -16,6 +15,7 @@ import ( - ""github.com/stretchr/testify/require"" - ""github.com/weaveworks/common/user"" - -+ ""github.com/grafana/loki/pkg/logproto"" - ""github.com/grafana/loki/pkg/storage/chunk/cache"" - ) - -@@ -58,12 +58,12 @@ var ( - ResultType: model.ValMatrix.String(), - Result: []SampleStream{ - { -- Labels: []cortexpb.LabelAdapter{ -+ Labels: []logproto.LabelAdapter{ - {Name: ""foo"", Value: ""bar""}, - }, -- Samples: []cortexpb.Sample{ -- {Value: 137, TimestampMs: 1536673680000}, -- {Value: 137, TimestampMs: 1536673780000}, -+ Samples: []logproto.Sample{ -+ {Value: 137, Timestamp: 1536673680000}, -+ {Value: 137, Timestamp: 1536673780000}, - }, - }, - }, -@@ -72,11 +72,11 @@ var ( - ) - - func mkAPIResponse(start, end, step int64) *PrometheusResponse { -- var samples []cortexpb.Sample -+ var samples []logproto.Sample - for i := start; i <= end; i += step { -- samples = append(samples, cortexpb.Sample{ -- TimestampMs: i, -- Value: float64(i), -+ samples = append(samples, logproto.Sample{ -+ Timestamp: i, -+ Value: float64(i), - }) - } - -@@ -86,7 +86,7 @@ func mkAPIResponse(start, end, step int64) *PrometheusResponse { - ResultType: matrix, - Result: []SampleStream{ - { -- Labels: []cortexpb.LabelAdapter{ -+ Labels: []logproto.LabelAdapter{ - {Name: ""foo"", Value: ""bar""}, - }, - Samples: samples, -@@ -1024,10 +1024,6 @@ func TestResultsCacheShouldCacheFunc(t *testing.T) { - } - } - --func toMs(t time.Duration) int64 { -- return int64(t / time.Millisecond) --} -- - type mockCacheGenNumberLoader struct { - } - -diff --git a/pkg/querier/queryrange/queryrangebase/series_test.go b/pkg/querier/queryrange/queryrangebase/series_test.go -index 728e0f01b7d80..539f6107360b9 100644 ---- a/pkg/querier/queryrange/queryrangebase/series_test.go -+++ b/pkg/querier/queryrange/queryrangebase/series_test.go -@@ -3,9 +3,10 @@ package queryrangebase - import ( - ""testing"" - -- ""github.com/cortexproject/cortex/pkg/cortexpb"" - ""github.com/prometheus/prometheus/promql/parser"" - ""github.com/stretchr/testify/require"" -+ -+ ""github.com/grafana/loki/pkg/logproto"" - ) - - func Test_ResponseToSamples(t *testing.T) { -@@ -14,34 +15,34 @@ func Test_ResponseToSamples(t *testing.T) { - ResultType: string(parser.ValueTypeMatrix), - Result: []SampleStream{ - { -- Labels: []cortexpb.LabelAdapter{ -+ Labels: []logproto.LabelAdapter{ - {Name: ""a"", Value: ""a1""}, - {Name: ""b"", Value: ""b1""}, - }, -- Samples: []cortexpb.Sample{ -+ Samples: []logproto.Sample{ - { -- Value: 1, -- TimestampMs: 1, -+ Value: 1, -+ Timestamp: 1, - }, - { -- Value: 2, -- TimestampMs: 2, -+ Value: 2, -+ Timestamp: 2, - }, - }, - }, - { -- Labels: []cortexpb.LabelAdapter{ -+ Labels: []logproto.LabelAdapter{ - {Name: ""a"", Value: ""a1""}, - {Name: ""b"", Value: ""b1""}, - }, -- Samples: []cortexpb.Sample{ -+ Samples: []logproto.Sample{ - { -- Value: 8, -- TimestampMs: 1, -+ Value: 8, -+ Timestamp: 1, - }, - { -- Value: 9, -- TimestampMs: 2, -+ Value: 9, -+ Timestamp: 2, - }, - }, - }, -@@ -49,9 +50,9 @@ func Test_ResponseToSamples(t *testing.T) { - }, - } - -- streams, err := ResponseToSamples(input) -+ stream, err := ResponseToSamples(input) - require.Nil(t, err) -- set := NewSeriesSet(streams) -+ set := NewSeriesSet(stream) - - setCt := 0 - -@@ -62,7 +63,7 @@ func Test_ResponseToSamples(t *testing.T) { - sampleCt := 0 - for iter.Next() { - ts, v := iter.At() -- require.Equal(t, input.Data.Result[setCt].Samples[sampleCt].TimestampMs, ts) -+ require.Equal(t, input.Data.Result[setCt].Samples[sampleCt].Timestamp, ts) - require.Equal(t, input.Data.Result[setCt].Samples[sampleCt].Value, v) - sampleCt++ - } -diff --git a/pkg/querier/queryrange/queryrangebase/split_by_interval_test.go b/pkg/querier/queryrange/queryrangebase/split_by_interval_test.go -index 03c8050a9df60..f59f560f40096 100644 ---- a/pkg/querier/queryrange/queryrangebase/split_by_interval_test.go -+++ b/pkg/querier/queryrange/queryrangebase/split_by_interval_test.go -@@ -377,3 +377,7 @@ func Test_evaluateAtModifier(t *testing.T) { - }) - } - } -+ -+func toMs(t time.Duration) int64 { -+ return int64(t / time.Millisecond) -+} -diff --git a/pkg/querier/queryrange/queryrangebase/value_test.go b/pkg/querier/queryrange/queryrangebase/value_test.go -index ffaed937ba558..ca8f4759e4fc4 100644 ---- a/pkg/querier/queryrange/queryrangebase/value_test.go -+++ b/pkg/querier/queryrange/queryrangebase/value_test.go -@@ -9,7 +9,7 @@ import ( - ""github.com/prometheus/prometheus/promql"" - ""github.com/stretchr/testify/require"" - -- ""github.com/cortexproject/cortex/pkg/cortexpb"" -+ ""github.com/grafana/loki/pkg/logproto"" - ) - - func TestFromValue(t *testing.T) { -@@ -33,10 +33,10 @@ func TestFromValue(t *testing.T) { - err: false, - expected: []SampleStream{ - { -- Samples: []cortexpb.Sample{ -+ Samples: []logproto.Sample{ - { -- Value: 1, -- TimestampMs: 1, -+ Value: 1, -+ Timestamp: 1, - }, - }, - }, -@@ -65,26 +65,26 @@ func TestFromValue(t *testing.T) { - err: false, - expected: []SampleStream{ - { -- Labels: []cortexpb.LabelAdapter{ -+ Labels: []logproto.LabelAdapter{ - {Name: ""a"", Value: ""a1""}, - {Name: ""b"", Value: ""b1""}, - }, -- Samples: []cortexpb.Sample{ -+ Samples: []logproto.Sample{ - { -- Value: 1, -- TimestampMs: 1, -+ Value: 1, -+ Timestamp: 1, - }, - }, - }, - { -- Labels: []cortexpb.LabelAdapter{ -+ Labels: []logproto.LabelAdapter{ - {Name: ""a"", Value: ""a2""}, - {Name: ""b"", Value: ""b2""}, - }, -- Samples: []cortexpb.Sample{ -+ Samples: []logproto.Sample{ - { -- Value: 2, -- TimestampMs: 2, -+ Value: 2, -+ Timestamp: 2, - }, - }, - }, -@@ -119,34 +119,34 @@ func TestFromValue(t *testing.T) { - err: false, - expected: []SampleStream{ - { -- Labels: []cortexpb.LabelAdapter{ -+ Labels: []logproto.LabelAdapter{ - {Name: ""a"", Value: ""a1""}, - {Name: ""b"", Value: ""b1""}, - }, -- Samples: []cortexpb.Sample{ -+ Samples: []logproto.Sample{ - { -- Value: 1, -- TimestampMs: 1, -+ Value: 1, -+ Timestamp: 1, - }, - { -- Value: 2, -- TimestampMs: 2, -+ Value: 2, -+ Timestamp: 2, - }, - }, - }, - { -- Labels: []cortexpb.LabelAdapter{ -+ Labels: []logproto.LabelAdapter{ - {Name: ""a"", Value: ""a2""}, - {Name: ""b"", Value: ""b2""}, - }, -- Samples: []cortexpb.Sample{ -+ Samples: []logproto.Sample{ - { -- Value: 8, -- TimestampMs: 1, -+ Value: 8, -+ Timestamp: 1, - }, - { -- Value: 9, -- TimestampMs: 2, -+ Value: 9, -+ Timestamp: 2, - }, - }, - },",unknown,"Fix `cortexpb` -> `logproto` rename in some tests (#5231) - -* Fix `cortexpg` -> `logproto` rename in some tests - -Signed-off-by: Kaviraj - -* Fix TimestampMs rename - -Signed-off-by: Kaviraj - -* queryrange test - -Signed-off-by: Kaviraj - -* Fix tests on queryrangebase pacakage\ - -Signed-off-by: Kaviraj - -* Fix some typos - -Signed-off-by: Kaviraj " -0bb257404029529e316f359454209ea3a72ef8bc,2024-02-13 13:05:39,Owen Diehl,"makes batchedLoader generic + removes unnecessary interfaces & adapters (#11924) - -While reviewing https://github.com/grafana/loki/pull/11919, I figured -it'd be nice to make `batchedLoader` generic so we can reuse it's logic. -This let me test it easier and remove a lot of now-unnecessary adapter -code (interfaces, types)",False,"diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go -index 5cece24172526..ed1f50ae72582 100644 ---- a/pkg/bloomcompactor/bloomcompactor.go -+++ b/pkg/bloomcompactor/bloomcompactor.go -@@ -90,7 +90,7 @@ func New( - c.metrics = NewMetrics(r, c.btMetrics) - - chunkLoader := NewStoreChunkLoader( -- NewFetcherProviderAdapter(fetcherProvider), -+ fetcherProvider, - c.metrics, - ) - -diff --git a/pkg/bloomcompactor/spec.go b/pkg/bloomcompactor/spec.go -index d9d9c68947a73..58dd2674895ed 100644 ---- a/pkg/bloomcompactor/spec.go -+++ b/pkg/bloomcompactor/spec.go -@@ -16,6 +16,7 @@ import ( - logql_log ""github.com/grafana/loki/pkg/logql/log"" - v1 ""github.com/grafana/loki/pkg/storage/bloom/v1"" - ""github.com/grafana/loki/pkg/storage/chunk"" -+ ""github.com/grafana/loki/pkg/storage/chunk/fetcher"" - ""github.com/grafana/loki/pkg/storage/stores"" - ""github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"" - ""github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/tsdb"" -@@ -235,39 +236,13 @@ type ChunkLoader interface { - Load(ctx context.Context, userID string, series *v1.Series) (*ChunkItersByFingerprint, error) - } - --// interface modeled from `pkg/storage/stores/composite_store.ChunkFetcherProvider` --type fetcherProvider interface { -- GetChunkFetcher(model.Time) chunkFetcher --} -- --// interface modeled from `pkg/storage/chunk/fetcher.Fetcher` --type chunkFetcher interface { -- FetchChunks(ctx context.Context, chunks []chunk.Chunk) ([]chunk.Chunk, error) --} -- --// Adapter turning `stores.ChunkFetcherProvider` into `fetcherProvider` --// The former returns a concrete type and is heavily used externally --// while the latter returns an interface for better testing and --// is used internally --type FetcherProviderAdapter struct { -- root stores.ChunkFetcherProvider --} -- --func NewFetcherProviderAdapter(root stores.ChunkFetcherProvider) *FetcherProviderAdapter { -- return &FetcherProviderAdapter{root: root} --} -- --func (f *FetcherProviderAdapter) GetChunkFetcher(t model.Time) chunkFetcher { -- return f.root.GetChunkFetcher(t) --} -- - // StoreChunkLoader loads chunks from a store - type StoreChunkLoader struct { -- fetcherProvider fetcherProvider -+ fetcherProvider stores.ChunkFetcherProvider - metrics *Metrics - } - --func NewStoreChunkLoader(fetcherProvider fetcherProvider, metrics *Metrics) *StoreChunkLoader { -+func NewStoreChunkLoader(fetcherProvider stores.ChunkFetcherProvider, metrics *Metrics) *StoreChunkLoader { - return &StoreChunkLoader{ - fetcherProvider: fetcherProvider, - metrics: metrics, -@@ -278,7 +253,7 @@ func (s *StoreChunkLoader) Load(ctx context.Context, userID string, series *v1.S - // NB(owen-d): This is probably unnecessary as we should only have one fetcher - // because we'll only be working on a single index period at a time, but this should protect - // us in the case of refactoring/changing this and likely isn't a perf bottleneck. -- chksByFetcher := make(map[chunkFetcher][]chunk.Chunk) -+ chksByFetcher := make(map[*fetcher.Fetcher][]chunk.Chunk) - for _, chk := range series.Chunks { - fetcher := s.fetcherProvider.GetChunkFetcher(chk.Start) - chksByFetcher[fetcher] = append(chksByFetcher[fetcher], chunk.Chunk{ -@@ -292,119 +267,152 @@ func (s *StoreChunkLoader) Load(ctx context.Context, userID string, series *v1.S - }) - } - -- work := make([]chunkWork, 0, len(chksByFetcher)) -+ var ( -+ fetchers = make([]Fetcher[chunk.Chunk, chunk.Chunk], 0, len(chksByFetcher)) -+ inputs = make([][]chunk.Chunk, 0, len(chksByFetcher)) -+ ) - for fetcher, chks := range chksByFetcher { -- work = append(work, chunkWork{ -- fetcher: fetcher, -- chks: chks, -- }) -+ fn := FetchFunc[chunk.Chunk, chunk.Chunk](fetcher.FetchChunks) -+ fetchers = append(fetchers, fn) -+ inputs = append(inputs, chks) - } - - return &ChunkItersByFingerprint{ - fp: series.Fingerprint, -- itr: newBatchedLoader(ctx, work, batchedLoaderDefaultBatchSize, s.metrics), -+ itr: newBatchedChunkLoader(ctx, fetchers, inputs, s.metrics, batchedLoaderDefaultBatchSize), - }, nil - } - --type chunkWork struct { -- fetcher chunkFetcher -- chks []chunk.Chunk -+type Fetcher[A, B any] interface { -+ Fetch(ctx context.Context, inputs []A) ([]B, error) -+} -+ -+type FetchFunc[A, B any] func(ctx context.Context, inputs []A) ([]B, error) -+ -+func (f FetchFunc[A, B]) Fetch(ctx context.Context, inputs []A) ([]B, error) { -+ return f(ctx, inputs) - } - - // batchedLoader implements `v1.Iterator[v1.ChunkRefWithIter]` in batches - // to ensure memory is bounded while loading chunks - // TODO(owen-d): testware --type batchedLoader struct { -+type batchedLoader[A, B, C any] struct { - metrics *Metrics - batchSize int - ctx context.Context -- work []chunkWork -+ fetchers []Fetcher[A, B] -+ work [][]A - -- cur v1.ChunkRefWithIter -- batch []chunk.Chunk -- err error -+ mapper func(B) (C, error) -+ cur C -+ batch []B -+ err error - } - - const batchedLoaderDefaultBatchSize = 50 - --func newBatchedLoader(ctx context.Context, work []chunkWork, batchSize int, metrics *Metrics) *batchedLoader { -- return &batchedLoader{ -- metrics: metrics, -- batchSize: batchSize, -+func newBatchedLoader[A, B, C any]( -+ ctx context.Context, -+ fetchers []Fetcher[A, B], -+ inputs [][]A, -+ mapper func(B) (C, error), -+ batchSize int, -+) *batchedLoader[A, B, C] { -+ return &batchedLoader[A, B, C]{ -+ batchSize: max(batchSize, 1), - ctx: ctx, -- work: work, -+ fetchers: fetchers, -+ work: inputs, -+ mapper: mapper, - } - } - --func (b *batchedLoader) Next() bool { -- if len(b.batch) > 0 { -- return b.prepNext(false) -- } -+func (b *batchedLoader[A, B, C]) Next() bool { - -- if len(b.work) == 0 { -- return false -- } -+ // iterate work until we have non-zero length batch -+ for len(b.batch) == 0 { - -- // setup next batch -- next := b.work[0] -- batchSize := min(b.batchSize, len(next.chks)) -- toFetch := next.chks[:batchSize] -- // update work -- b.work[0].chks = next.chks[batchSize:] -- if len(b.work[0].chks) == 0 { -- b.work = b.work[1:] -- } -+ // empty batch + no work remaining = we're done -+ if len(b.work) == 0 { -+ return false -+ } - -- if len(toFetch) == 0 { -- return false -- } -+ // setup next batch -+ next := b.work[0] -+ batchSize := min(b.batchSize, len(next)) -+ toFetch := next[:batchSize] -+ fetcher := b.fetchers[0] -+ -+ // update work -+ b.work[0] = b.work[0][batchSize:] -+ if len(b.work[0]) == 0 { -+ // if we've exhausted work from this set of inputs, -+ // set pointer to next set of inputs -+ // and their respective fetcher -+ b.work = b.work[1:] -+ b.fetchers = b.fetchers[1:] -+ } - -- b.batch, b.err = next.fetcher.FetchChunks(b.ctx, toFetch) -- if b.err != nil { -- return false -+ // there was no work in this batch; continue (should not happen) -+ if len(toFetch) == 0 { -+ continue -+ } -+ -+ b.batch, b.err = fetcher.Fetch(b.ctx, toFetch) -+ // error fetching, short-circuit iteration -+ if b.err != nil { -+ return false -+ } - } - -- return b.prepNext(true) -+ return b.prepNext() - } - --func (b *batchedLoader) prepNext(checkLen bool) bool { -- if checkLen && len(b.batch) == 0 { -- return false -- } -- b.cur, b.err = b.format(b.batch[0]) -+func (b *batchedLoader[_, B, C]) prepNext() bool { -+ b.cur, b.err = b.mapper(b.batch[0]) - b.batch = b.batch[1:] - return b.err == nil - } - --func (b *batchedLoader) format(c chunk.Chunk) (v1.ChunkRefWithIter, error) { -- chk := c.Data.(*chunkenc.Facade).LokiChunk() -- b.metrics.chunkSize.Observe(float64(chk.UncompressedSize())) -- itr, err := chk.Iterator( -- b.ctx, -- time.Unix(0, 0), -- time.Unix(0, math.MaxInt64), -- logproto.FORWARD, -- logql_log.NewNoopPipeline().ForStream(c.Metric), -- ) -+func newBatchedChunkLoader( -+ ctx context.Context, -+ fetchers []Fetcher[chunk.Chunk, chunk.Chunk], -+ inputs [][]chunk.Chunk, -+ metrics *Metrics, -+ batchSize int, -+) *batchedLoader[chunk.Chunk, chunk.Chunk, v1.ChunkRefWithIter] { -+ -+ mapper := func(c chunk.Chunk) (v1.ChunkRefWithIter, error) { -+ chk := c.Data.(*chunkenc.Facade).LokiChunk() -+ metrics.chunkSize.Observe(float64(chk.UncompressedSize())) -+ itr, err := chk.Iterator( -+ ctx, -+ time.Unix(0, 0), -+ time.Unix(0, math.MaxInt64), -+ logproto.FORWARD, -+ logql_log.NewNoopPipeline().ForStream(c.Metric), -+ ) - -- if err != nil { -- return v1.ChunkRefWithIter{}, err -- } -+ if err != nil { -+ return v1.ChunkRefWithIter{}, err -+ } - -- return v1.ChunkRefWithIter{ -- Ref: v1.ChunkRef{ -- Start: c.From, -- End: c.Through, -- Checksum: c.Checksum, -- }, -- Itr: itr, -- }, nil -+ return v1.ChunkRefWithIter{ -+ Ref: v1.ChunkRef{ -+ Start: c.From, -+ End: c.Through, -+ Checksum: c.Checksum, -+ }, -+ Itr: itr, -+ }, nil -+ } -+ return newBatchedLoader(ctx, fetchers, inputs, mapper, batchSize) - } - --func (b *batchedLoader) At() v1.ChunkRefWithIter { -+func (b *batchedLoader[_, _, C]) At() C { - return b.cur - } - --func (b *batchedLoader) Err() error { -+func (b *batchedLoader[_, _, _]) Err() error { - return b.err - } -diff --git a/pkg/bloomcompactor/spec_test.go b/pkg/bloomcompactor/spec_test.go -index 798d65e2f2bcd..44b1fa26a4d1f 100644 ---- a/pkg/bloomcompactor/spec_test.go -+++ b/pkg/bloomcompactor/spec_test.go -@@ -3,6 +3,7 @@ package bloomcompactor - import ( - ""bytes"" - ""context"" -+ ""errors"" - ""testing"" - - ""github.com/go-kit/log"" -@@ -155,3 +156,129 @@ func TestSimpleBloomGenerator(t *testing.T) { - }) - } - } -+ -+func TestBatchedLoader(t *testing.T) { -+ errMapper := func(i int) (int, error) { -+ return 0, errors.New(""bzzt"") -+ } -+ successMapper := func(i int) (int, error) { -+ return i, nil -+ } -+ -+ expired, cancel := context.WithCancel(context.Background()) -+ cancel() -+ -+ for _, tc := range []struct { -+ desc string -+ ctx context.Context -+ batchSize int -+ mapper func(int) (int, error) -+ err bool -+ inputs [][]int -+ exp []int -+ }{ -+ { -+ desc: ""OneBatch"", -+ ctx: context.Background(), -+ batchSize: 2, -+ mapper: successMapper, -+ err: false, -+ inputs: [][]int{{0, 1}}, -+ exp: []int{0, 1}, -+ }, -+ { -+ desc: ""ZeroBatchSizeStillWorks"", -+ ctx: context.Background(), -+ batchSize: 0, -+ mapper: successMapper, -+ err: false, -+ inputs: [][]int{{0, 1}}, -+ exp: []int{0, 1}, -+ }, -+ { -+ desc: ""OneBatchLessThanFull"", -+ ctx: context.Background(), -+ batchSize: 2, -+ mapper: successMapper, -+ err: false, -+ inputs: [][]int{{0}}, -+ exp: []int{0}, -+ }, -+ { -+ desc: ""TwoBatches"", -+ ctx: context.Background(), -+ batchSize: 2, -+ mapper: successMapper, -+ err: false, -+ inputs: [][]int{{0, 1, 2, 3}}, -+ exp: []int{0, 1, 2, 3}, -+ }, -+ { -+ desc: ""MultipleBatchesMultipleLoaders"", -+ ctx: context.Background(), -+ batchSize: 2, -+ mapper: successMapper, -+ err: false, -+ inputs: [][]int{{0, 1}, {2}, {3, 4, 5}}, -+ exp: []int{0, 1, 2, 3, 4, 5}, -+ }, -+ { -+ desc: ""HandlesEmptyInputs"", -+ ctx: context.Background(), -+ batchSize: 2, -+ mapper: successMapper, -+ err: false, -+ inputs: [][]int{{0, 1, 2, 3}, nil, {4}}, -+ exp: []int{0, 1, 2, 3, 4}, -+ }, -+ { -+ desc: ""Timeout"", -+ ctx: expired, -+ batchSize: 2, -+ mapper: successMapper, -+ err: true, -+ inputs: [][]int{{0}}, -+ }, -+ { -+ desc: ""MappingFailure"", -+ ctx: context.Background(), -+ batchSize: 2, -+ mapper: errMapper, -+ err: true, -+ inputs: [][]int{{0}}, -+ }, -+ } { -+ t.Run(tc.desc, func(t *testing.T) { -+ fetchers := make([]Fetcher[int, int], 0, len(tc.inputs)) -+ for range tc.inputs { -+ fetchers = append( -+ fetchers, -+ FetchFunc[int, int](func(ctx context.Context, xs []int) ([]int, error) { -+ if ctx.Err() != nil { -+ return nil, ctx.Err() -+ } -+ return xs, nil -+ }), -+ ) -+ } -+ -+ loader := newBatchedLoader[int, int, int]( -+ tc.ctx, -+ fetchers, -+ tc.inputs, -+ tc.mapper, -+ tc.batchSize, -+ ) -+ -+ got, err := v1.Collect[int](loader) -+ if tc.err { -+ require.Error(t, err) -+ return -+ } -+ require.NoError(t, err) -+ require.Equal(t, tc.exp, got) -+ -+ }) -+ } -+ -+}",unknown,"makes batchedLoader generic + removes unnecessary interfaces & adapters (#11924) - -While reviewing https://github.com/grafana/loki/pull/11919, I figured -it'd be nice to make `batchedLoader` generic so we can reuse it's logic. -This let me test it easier and remove a lot of now-unnecessary adapter -code (interfaces, types)" -e75acd79fc48c357008a4822733133d6b1092c97,2019-08-18 20:02:20,Edward Welch,update helm and ksonnet to 0.3.0,False,"diff --git a/production/helm/loki-stack/Chart.yaml b/production/helm/loki-stack/Chart.yaml -index c1c6cf5bd780c..4c8fa7187b7c0 100644 ---- a/production/helm/loki-stack/Chart.yaml -+++ b/production/helm/loki-stack/Chart.yaml -@@ -1,6 +1,6 @@ - name: loki-stack --version: 0.15.0 --appVersion: 0.0.1 -+version: 0.16.0 -+appVersion: v0.3.0 - kubeVersion: ""^1.10.0-0"" - description: ""Loki: like Prometheus, but for logs."" - home: https://grafana.com/loki -diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml -index 21e164747da72..a0eb66d523f84 100644 ---- a/production/helm/loki/Chart.yaml -+++ b/production/helm/loki/Chart.yaml -@@ -1,6 +1,6 @@ - name: loki --version: 0.13.0 --appVersion: 0.0.1 -+version: 0.14.0 -+appVersion: v0.3.0 - kubeVersion: ""^1.10.0-0"" - description: ""Loki: like Prometheus, but for logs."" - home: https://grafana.com/loki -diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml -index 264514140b3d8..4a721643d4757 100644 ---- a/production/helm/loki/values.yaml -+++ b/production/helm/loki/values.yaml -@@ -66,7 +66,7 @@ config: - - image: - repository: grafana/loki -- tag: v0.2.0 -+ tag: v0.3.0 - pullPolicy: IfNotPresent - - ## Additional Loki container arguments, e.g. log level (debug, info, warn, error) -diff --git a/production/helm/promtail/Chart.yaml b/production/helm/promtail/Chart.yaml -index 1b2b63487a8f2..cbd5bb210685e 100644 ---- a/production/helm/promtail/Chart.yaml -+++ b/production/helm/promtail/Chart.yaml -@@ -1,6 +1,6 @@ - name: promtail --version: 0.11.0 --appVersion: 0.0.1 -+version: 0.12.0 -+appVersion: v0.3.0 - kubeVersion: ""^1.10.0-0"" - description: ""Responsible for gathering logs and sending them to Loki"" - home: https://grafana.com/loki -diff --git a/production/helm/promtail/values.yaml b/production/helm/promtail/values.yaml -index 2b0065f19dff6..52373505a871a 100644 ---- a/production/helm/promtail/values.yaml -+++ b/production/helm/promtail/values.yaml -@@ -8,7 +8,7 @@ deploymentStrategy: RollingUpdate - - image: - repository: grafana/promtail -- tag: v0.2.0 -+ tag: v0.3.0 - pullPolicy: IfNotPresent - - livenessProbe: {} -diff --git a/production/ksonnet/loki/images.libsonnet b/production/ksonnet/loki/images.libsonnet -index 8f76fb152b5d2..5eaec58acd677 100644 ---- a/production/ksonnet/loki/images.libsonnet -+++ b/production/ksonnet/loki/images.libsonnet -@@ -4,7 +4,7 @@ - memcached: 'memcached:1.5.6-alpine', - memcachedExporter: 'prom/memcached-exporter:v0.4.1', - -- loki: 'grafana/loki:v0.2.0', -+ loki: 'grafana/loki:v0.3.0', - - distributor: self.loki, - ingester: self.loki, -diff --git a/production/ksonnet/promtail/config.libsonnet b/production/ksonnet/promtail/config.libsonnet -index dade4569733bc..01c91afc02ae1 100644 ---- a/production/ksonnet/promtail/config.libsonnet -+++ b/production/ksonnet/promtail/config.libsonnet -@@ -1,6 +1,6 @@ - { - _images+:: { -- promtail: 'grafana/promtail:v0.2.0', -+ promtail: 'grafana/promtail:v0.3.0', - }, - - _config+:: {",unknown,update helm and ksonnet to 0.3.0 -8cb57424e3238130edbb688328e27ad765239d9e,2021-10-25 23:06:26,JordanRushing,"Update CHANGELOG.md and /docs with info on new `overrides-exporter` module for Loki (#4540) - -* Update CHANGELOG.md and /docs with info on new overrides-exporter module for Loki - -* Apply small changes to `overrides-exporter` docs - -Co-authored-by: Owen Diehl - -Co-authored-by: Owen Diehl ",False,"diff --git a/CHANGELOG.md b/CHANGELOG.md -index bd550d543d7f8..db2ae8c9b0dcb 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -7,6 +7,7 @@ - * [4473](https://github.com/grafana/loki/pull/4473) **trevorwhitney**: Config: add object storage configuration to common config - * [4425](https://github.com/grafana/loki/pull/4425) **trevorwhitney** and **slim-bean**: Add a ring for the query scheduler - * [4519](https://github.com/grafana/loki/pull/4519) **DylanGuedes** and **replay**: Loki: Enable FIFO cache by default -+* [4520](https://github.com/grafana/loki/pull/4520) **jordanrushing** and **owen-d**: Introduce overrides-exporter module for tenant limits - - # 2.3.0 (2021/08/06) - -diff --git a/docs/sources/operations/overrides-exporter.md b/docs/sources/operations/overrides-exporter.md -new file mode 100644 -index 0000000000000..2dba66507d60b ---- /dev/null -+++ b/docs/sources/operations/overrides-exporter.md -@@ -0,0 +1,78 @@ -+--- -+title: ""Overrides Exporter"" -+weight: 20 -+--- -+ -+Loki is a multi-tenant system that supports applying limits to each tenant as a mechanism for resource management. The `overrides-exporter` module exposes these limits as Prometheus metrics in order to help operators better understand tenant behavior. -+ -+## Context -+ -+Configuration updates to tenant limits can be applied to Loki without restart via the [`runtime_config`](../configuration/#runtime-configuration-file) feature. -+ -+## Example -+ -+The `overrides-exporter` module is disabled by default. We recommend running a single instance per cluster to avoid issues with metric cardinality as the `overrides-exporter` creates ~40 metrics per tenant with overrides configured. -+ -+Using an example `runtime.yaml`: -+ -+```yaml -+overrides: -+ ""tenant_1"": -+ ingestion_rate_mb: 10 -+ max_streams_per_user: 100000 -+ max_chunks_per_query: 100000 -+``` -+ -+Launch an instance of the `overrides-exporter`: -+ -+```shell -+loki -target=overrides-exporter -runtime-config.file=runtime.yaml -config.file=basic_schema_config.yaml -server.http-listen-port=8080 -+``` -+ -+To inspect the tenant limit overrides: -+ -+```shell -+$ curl -sq localhost:8080/metrics | grep override -+# HELP loki_overrides Resource limit overrides applied to tenants -+# TYPE loki_overrides gauge -+loki_overrides{limit_name=""cardinality_limit"",user=""user1""} 100000 -+loki_overrides{limit_name=""creation_grace_period"",user=""user1""} 6e+11 -+loki_overrides{limit_name=""ingestion_burst_size_mb"",user=""user1""} 350000 -+loki_overrides{limit_name=""ingestion_rate_mb"",user=""user1""} 10 -+loki_overrides{limit_name=""max_cache_freshness_per_query"",user=""user1""} 6e+10 -+loki_overrides{limit_name=""max_chunks_per_query"",user=""user1""} 100000 -+loki_overrides{limit_name=""max_concurrent_tail_requests"",user=""user1""} 10 -+loki_overrides{limit_name=""max_entries_limit_per_query"",user=""user1""} 5000 -+loki_overrides{limit_name=""max_global_streams_per_user"",user=""user1""} 5000 -+loki_overrides{limit_name=""max_label_name_length"",user=""user1""} 1024 -+loki_overrides{limit_name=""max_label_names_per_series"",user=""user1""} 30 -+loki_overrides{limit_name=""max_label_value_length"",user=""user1""} 2048 -+loki_overrides{limit_name=""max_line_size"",user=""user1""} 0 -+loki_overrides{limit_name=""max_queriers_per_tenant"",user=""user1""} 0 -+loki_overrides{limit_name=""max_query_length"",user=""user1""} 2.5956e+15 -+loki_overrides{limit_name=""max_query_lookback"",user=""user1""} 0 -+loki_overrides{limit_name=""max_query_parallelism"",user=""user1""} 32 -+loki_overrides{limit_name=""max_query_series"",user=""user1""} 1000 -+loki_overrides{limit_name=""max_streams_matchers_per_query"",user=""user1""} 1000 -+loki_overrides{limit_name=""max_streams_per_user"",user=""user1""} 100000 -+loki_overrides{limit_name=""min_sharding_lookback"",user=""user1""} 0 -+loki_overrides{limit_name=""per_stream_rate_limit"",user=""user1""} 3.145728e+06 -+loki_overrides{limit_name=""per_stream_rate_limit_burst"",user=""user1""} 1.572864e+07 -+loki_overrides{limit_name=""per_tenant_override_period"",user=""user1""} 1e+10 -+loki_overrides{limit_name=""reject_old_samples_max_age"",user=""user1""} 1.2096e+15 -+loki_overrides{limit_name=""retention_period"",user=""user1""} 2.6784e+15 -+loki_overrides{limit_name=""ruler_evaluation_delay_duration"",user=""user1""} 0 -+loki_overrides{limit_name=""ruler_max_rule_groups_per_tenant"",user=""user1""} 0 -+loki_overrides{limit_name=""ruler_max_rules_per_rule_group"",user=""user1""} 0 -+loki_overrides{limit_name=""ruler_remote_write_queue_batch_send_deadline"",user=""user1""} 0 -+loki_overrides{limit_name=""ruler_remote_write_queue_capacity"",user=""user1""} 0 -+loki_overrides{limit_name=""ruler_remote_write_queue_max_backoff"",user=""user1""} 0 -+loki_overrides{limit_name=""ruler_remote_write_queue_max_samples_per_send"",user=""user1""} 0 -+loki_overrides{limit_name=""ruler_remote_write_queue_max_shards"",user=""user1""} 0 -+loki_overrides{limit_name=""ruler_remote_write_queue_min_backoff"",user=""user1""} 0 -+loki_overrides{limit_name=""ruler_remote_write_queue_min_shards"",user=""user1""} 0 -+loki_overrides{limit_name=""ruler_remote_write_timeout"",user=""user1""} 0 -+loki_overrides{limit_name=""split_queries_by_interval"",user=""user1""} 0 -+``` -+ -+Alerts can be created based on these metrics to inform operators when tenants are close to hitting their limits allowing for increases to be applied before the tenant limits are exceeded.",unknown,"Update CHANGELOG.md and /docs with info on new `overrides-exporter` module for Loki (#4540) - -* Update CHANGELOG.md and /docs with info on new overrides-exporter module for Loki - -* Apply small changes to `overrides-exporter` docs - -Co-authored-by: Owen Diehl - -Co-authored-by: Owen Diehl " -5b9de9472207945d1919af977ac44f7baabc111f,2023-10-04 20:46:07,ngc4579,"Helm: allow GrafanaAgent tolerations (#10613) - -**What this PR does / why we need it**: -Helm: Allow setting tolerations for GrafanaAgent pods in order to have -them scheduled to tainted nodes as well. - -**Which issue(s) this PR fixes**: -Fixes #10575 - -**Special notes for your reviewer**: -n/a - -**Checklist** -- [x] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [x] Documentation added -- [ ] Tests updated -- [x] `CHANGELOG.md` updated -- [x] If the change is worth mentioning in the release notes, add -`add-to-release-notes` label -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/setup/upgrade/_index.md` -- [x] For Helm chart changes bump the Helm chart version in -`production/helm/loki/Chart.yaml` and update -`production/helm/loki/CHANGELOG.md` and -`production/helm/loki/README.md`. [Example -PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - ---------- - -Co-authored-by: J Stickler -Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com>",False,"diff --git a/CHANGELOG.md b/CHANGELOG.md -index 0f0ea46e6e52b..13de3bc91f33f 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -6,6 +6,7 @@ - - ##### Enhancements - -+* [10613](https://github.com/grafana/loki/pull/10613) **ngc4579**: Helm: allow GrafanaAgent tolerations - * [10295](https://github.com/grafana/loki/pull/10295) **changhyuni**: Storage: remove signatureversionv2 from s3. - * [10140](https://github.com/grafana/loki/pull/10140) **dannykopping**: Dynamic client-side throttling to avoid object storage rate-limits (GCS only) - * [10302](https://github.com/grafana/loki/pull/10302) **ashwanthgoli**: Removes already deprecated `-querier.engine.timeout` CLI flag and corresponding YAML setting as well as the `querier.query_timeout` YAML setting. -diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md -index 9fb9e8282f605..74be975672570 100644 ---- a/docs/sources/setup/install/helm/reference.md -+++ b/docs/sources/setup/install/helm/reference.md -@@ -2682,6 +2682,15 @@ true -
- null
- 
-+ -+ -+ -+ monitoring.selfMonitoring.grafanaAgent.tolerations -+ list -+ Tolerations for GrafanaAgent pods -+
-+[]
-+
- - - -diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md -index 9c224f2a807f3..552e6b50085eb 100644 ---- a/production/helm/loki/CHANGELOG.md -+++ b/production/helm/loki/CHANGELOG.md -@@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang - - [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) - -+## 5.24.0 -+ -+- [ENHANCEMENT] #10613 Allow tolerations for GrafanaAgent pods -+ - ## 5.23.1 - - - [BUGFIX] Add missing namespaces to some components -diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml -index 89d1e2f6c8e00..a4a0abdb82912 100644 ---- a/production/helm/loki/Chart.yaml -+++ b/production/helm/loki/Chart.yaml -@@ -3,7 +3,7 @@ name: loki - description: Helm chart for Grafana Loki in simple, scalable mode - type: application - appVersion: 2.9.1 --version: 5.23.1 -+version: 5.24.0 - home: https://grafana.github.io/helm-charts - sources: - - https://github.com/grafana/loki -diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md -index 002898dad582a..4794b6f98beb2 100644 ---- a/production/helm/loki/README.md -+++ b/production/helm/loki/README.md -@@ -1,6 +1,6 @@ - # loki - --![Version: 5.23.1](https://img.shields.io/badge/Version-5.23.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.1](https://img.shields.io/badge/AppVersion-2.9.1-informational?style=flat-square) -+![Version: 5.24.0](https://img.shields.io/badge/Version-5.24.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.1](https://img.shields.io/badge/AppVersion-2.9.1-informational?style=flat-square) - - Helm chart for Grafana Loki in simple, scalable mode - -diff --git a/production/helm/loki/templates/monitoring/grafana-agent.yaml b/production/helm/loki/templates/monitoring/grafana-agent.yaml -index e32a4adf7da36..c9723410285ab 100644 ---- a/production/helm/loki/templates/monitoring/grafana-agent.yaml -+++ b/production/helm/loki/templates/monitoring/grafana-agent.yaml -@@ -29,6 +29,10 @@ spec: - matchLabels: - {{- include ""loki.selectorLabels"" $ | nindent 8 }} - {{- end }} -+ {{- with .tolerations }} -+ tolerations: -+ {{- toYaml . | nindent 4 }} -+ {{- end }} - {{- end }} - - --- -diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml -index a89c4c7578b2e..24120c6fadaf9 100644 ---- a/production/helm/loki/values.yaml -+++ b/production/helm/loki/values.yaml -@@ -631,6 +631,8 @@ monitoring: - enableConfigReadAPI: false - # -- The name of the PriorityClass for GrafanaAgent pods - priorityClassName: null -+ # -- Tolerations for GrafanaAgent pods -+ tolerations: [] - # PodLogs configuration - podLogs: - # -- PodLogs annotations",Helm,"allow GrafanaAgent tolerations (#10613) - -**What this PR does / why we need it**: -Helm: Allow setting tolerations for GrafanaAgent pods in order to have -them scheduled to tainted nodes as well. - -**Which issue(s) this PR fixes**: -Fixes #10575 - -**Special notes for your reviewer**: -n/a - -**Checklist** -- [x] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [x] Documentation added -- [ ] Tests updated -- [x] `CHANGELOG.md` updated -- [x] If the change is worth mentioning in the release notes, add -`add-to-release-notes` label -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/setup/upgrade/_index.md` -- [x] For Helm chart changes bump the Helm chart version in -`production/helm/loki/Chart.yaml` and update -`production/helm/loki/CHANGELOG.md` and -`production/helm/loki/README.md`. [Example -PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - ---------- - -Co-authored-by: J Stickler -Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com>" -5fd5e06cfc66bcaa35ce9f7009ec585efda759b7,2024-03-08 00:14:58,Robert Jacob,fix(operator): Update Go builder for size-calculator (#12161),False,"diff --git a/operator/calculator.Dockerfile b/operator/calculator.Dockerfile -index e12d7ad490088..be7324ce33df3 100644 ---- a/operator/calculator.Dockerfile -+++ b/operator/calculator.Dockerfile -@@ -1,5 +1,5 @@ - # Build the calculator binary --FROM golang:1.20.6 as builder -+FROM golang:1.21.7 as builder - - WORKDIR /workspace - # Copy the Go Modules manifests",fix,Update Go builder for size-calculator (#12161) -375fc86af7595e2ec83d46d5482172c1bb69abf0,2020-02-05 20:25:00,Cyril Tovena,"Fixes stats summary computation. (#1636) - -* Fixes stats summary computation. - -Signed-off-by: Cyril Tovena - -* Fixes division by zero - -Signed-off-by: Cyril Tovena ",False,"diff --git a/pkg/logql/stats/context.go b/pkg/logql/stats/context.go -index de5aaf6bbe209..9371297ac5121 100644 ---- a/pkg/logql/stats/context.go -+++ b/pkg/logql/stats/context.go -@@ -153,31 +153,29 @@ func Snapshot(ctx context.Context, execTime time.Duration) Result { - res.Store.CompressedBytes = c.CompressedBytes - res.Store.TotalDuplicates = c.TotalDuplicates - } -+ res.ComputeSummary(execTime) -+ return res -+} - -+// ComputeSummary calculates the summary based on store and ingester data. -+func (r *Result) ComputeSummary(execTime time.Duration) { - // calculate the summary -- res.Summary.TotalBytesProcessed = res.Store.DecompressedBytes + res.Store.HeadChunkBytes + -- res.Ingester.DecompressedBytes + res.Ingester.HeadChunkBytes -- res.Summary.BytesProcessedPerSeconds = -- int64(float64(res.Summary.TotalBytesProcessed) / -- execTime.Seconds()) -- res.Summary.TotalLinesProcessed = res.Store.DecompressedLines + res.Store.HeadChunkLines + -- res.Ingester.DecompressedLines + res.Ingester.HeadChunkLines -- res.Summary.LinesProcessedPerSeconds = -- int64(float64(res.Summary.TotalLinesProcessed) / -- execTime.Seconds()) -- res.Summary.ExecTime = execTime.Seconds() -- return res -+ r.Summary.TotalBytesProcessed = r.Store.DecompressedBytes + r.Store.HeadChunkBytes + -+ r.Ingester.DecompressedBytes + r.Ingester.HeadChunkBytes -+ r.Summary.TotalLinesProcessed = r.Store.DecompressedLines + r.Store.HeadChunkLines + -+ r.Ingester.DecompressedLines + r.Ingester.HeadChunkLines -+ r.Summary.ExecTime = execTime.Seconds() -+ if execTime != 0 { -+ r.Summary.BytesProcessedPerSeconds = -+ int64(float64(r.Summary.TotalBytesProcessed) / -+ execTime.Seconds()) -+ r.Summary.LinesProcessedPerSeconds = -+ int64(float64(r.Summary.TotalLinesProcessed) / -+ execTime.Seconds()) -+ } - } - - func (r *Result) Merge(m Result) { -- if r == nil { -- return -- } -- r.Summary.BytesProcessedPerSeconds += m.Summary.BytesProcessedPerSeconds -- r.Summary.LinesProcessedPerSeconds += m.Summary.LinesProcessedPerSeconds -- r.Summary.TotalBytesProcessed += m.Summary.TotalBytesProcessed -- r.Summary.TotalLinesProcessed += m.Summary.TotalLinesProcessed -- r.Summary.ExecTime += m.Summary.ExecTime - - r.Store.TotalChunksRef += m.Store.TotalChunksRef - r.Store.TotalChunksDownloaded += m.Store.TotalChunksDownloaded -@@ -199,4 +197,6 @@ func (r *Result) Merge(m Result) { - r.Ingester.DecompressedLines += m.Ingester.DecompressedLines - r.Ingester.CompressedBytes += m.Ingester.CompressedBytes - r.Ingester.TotalDuplicates += m.Ingester.TotalDuplicates -+ -+ r.ComputeSummary(time.Duration(int64((r.Summary.ExecTime + m.Summary.ExecTime) * float64(time.Second)))) - } -diff --git a/pkg/logql/stats/context_test.go b/pkg/logql/stats/context_test.go -index 5873b3d77b204..b160544261f96 100644 ---- a/pkg/logql/stats/context_test.go -+++ b/pkg/logql/stats/context_test.go -@@ -88,6 +88,9 @@ func fakeIngesterQuery(ctx context.Context) { - func TestResult_Merge(t *testing.T) { - var res Result - -+ res.Merge(res) // testing zero. -+ require.Equal(t, res, res) -+ - toMerge := Result{ - Ingester: Ingester{ - TotalChunksMatched: 200, -@@ -123,4 +126,40 @@ func TestResult_Merge(t *testing.T) { - - res.Merge(toMerge) - require.Equal(t, toMerge, res) -+ -+ // merge again -+ res.Merge(toMerge) -+ require.Equal(t, Result{ -+ Ingester: Ingester{ -+ TotalChunksMatched: 2 * 200, -+ TotalBatches: 2 * 50, -+ TotalLinesSent: 2 * 60, -+ HeadChunkBytes: 2 * 10, -+ HeadChunkLines: 2 * 20, -+ DecompressedBytes: 2 * 24, -+ DecompressedLines: 2 * 40, -+ CompressedBytes: 2 * 60, -+ TotalDuplicates: 2 * 2, -+ TotalReached: 2 * 2, -+ }, -+ Store: Store{ -+ TotalChunksRef: 2 * 50, -+ TotalChunksDownloaded: 2 * 60, -+ ChunksDownloadTime: 2 * time.Second.Seconds(), -+ HeadChunkBytes: 2 * 10, -+ HeadChunkLines: 2 * 20, -+ DecompressedBytes: 2 * 40, -+ DecompressedLines: 2 * 20, -+ CompressedBytes: 2 * 30, -+ TotalDuplicates: 2 * 10, -+ }, -+ Summary: Summary{ -+ ExecTime: 2 * 2 * time.Second.Seconds(), -+ BytesProcessedPerSeconds: int64(42), // 2 requests at the same pace should give the same bytes/lines per sec -+ LinesProcessedPerSeconds: int64(50), -+ TotalBytesProcessed: 2 * int64(84), -+ TotalLinesProcessed: 2 * int64(100), -+ }, -+ }, res) -+ - }",unknown,"Fixes stats summary computation. (#1636) - -* Fixes stats summary computation. - -Signed-off-by: Cyril Tovena - -* Fixes division by zero - -Signed-off-by: Cyril Tovena " -72902af4738b8abc6c74af2dbb119402e4c432a9,2020-02-11 03:57:10,Oscar Santiago,"Correcte syntax of rate example (#1641) - -* Correcte syntax of rate example - -* Update docs/logql.md - -Co-Authored-By: Cyril Tovena - -Co-authored-by: Cyril Tovena ",False,"diff --git a/docs/logql.md b/docs/logql.md -index ba5d2b2980e7d..9fb70a0c1a266 100644 ---- a/docs/logql.md -+++ b/docs/logql.md -@@ -97,7 +97,7 @@ The currently supported functions for operating over are: - This example counts all the log lines within the last five minutes for the - MySQL job. - --> `rate( ( {job=""mysql""} |= ""error"" != ""timeout)[10s] ) )` -+> `rate({job=""mysql""} |= ""error"" != ""timeout"" [10s] )` - - This example demonstrates that a fully LogQL query can be wrapped in the - aggregation syntax, including filter expressions. This example gets the",unknown,"Correcte syntax of rate example (#1641) - -* Correcte syntax of rate example - -* Update docs/logql.md - -Co-Authored-By: Cyril Tovena - -Co-authored-by: Cyril Tovena " -061bdfe67208eef0af11b0a31214f2470088b585,2024-10-31 20:52:00,Jay Clifford,docs: AWS Cloud Guide Update (#14687),False,"diff --git a/docs/sources/setup/install/helm/deployment-guides/aws.md b/docs/sources/setup/install/helm/deployment-guides/aws.md -index bbe80da5e7761..380a37dd4ff6c 100644 ---- a/docs/sources/setup/install/helm/deployment-guides/aws.md -+++ b/docs/sources/setup/install/helm/deployment-guides/aws.md -@@ -18,12 +18,12 @@ There are two methods for authenticating and connecting Loki to AWS S3. We will - ## Considerations - - {{< admonition type=""caution"" >}} --This guide was accurate at the time it was last updated on **21st October, 2024**. As cloud providers frequently update their services and offerings, as a best practice, you should refer to the [AWS S3 documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html) before creating your buckets and assigning roles. -+This guide was accurate at the time it was last updated on **31st October, 2024**. As cloud providers frequently update their services and offerings, as a best practice, you should refer to the [AWS S3 documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html) before creating your buckets and assigning roles. - {{< /admonition >}} - - - **IAM Role:** The IAM role created in this guide is a basic role that allows Loki to read and write to the S3 bucket. You may wish to add more granular permissions based on your requirements. - --- **Authentication:** Grafana Loki comes with a basic authentication layer. The Loki gateway (NGINX) is exposed to the internet using basic authentication in this example. NGINX can also be replaced with other open-source reverse proxies. Refer to [Authentication](https://grafana.com/docs/loki//operations/authentication/) for more information. - - - **Retention:** The retention period is set to 28 days in the `values.yaml` file. You may wish to adjust this based on your requirements. - -@@ -48,7 +48,7 @@ The minimum requirements for deploying Loki on EKS are: - - - Kubernetes version `1.30` or above. - - `3` nodes for the EKS cluster. --- Instance type depends on your workload. A good starting point is `m5.xlarge`. -+- Instance type depends on your workload. A good starting point for a production cluster is `m7i.2xlarge`. - - Here is the EKSctl cluster configuration file used in this guide: - -@@ -59,8 +59,8 @@ apiVersion: eksctl.io/v1alpha5 - kind: ClusterConfig - - metadata: -- name: -- region: -+ name: -+ region: - version: ""1.31"" - - iam: -@@ -68,28 +68,25 @@ iam: - - addons: - - name: aws-ebs-csi-driver -- - name: eks-pod-identity-agent - - managedNodeGroups: - - name: loki-workers -- instanceType: m5.xlarge -+ instanceType: m7i.2xlarge - desiredCapacity: 3 - minSize: 2 - maxSize: 3 -- amiFamily: AmazonLinux2 -+ amiFamily: AmazonLinux2023 - iam: - withAddonPolicies: - ebs: true - volumeSize: 80 -- volumeType: gp2 -+ volumeType: gp3 - ebsOptimized: true -- - ``` - - - The following plugins must also be installed within the EKS cluster: - - **Amazon EBS CSI Driver**: Enables Kubernetes to dynamically provision and manage EBS volumes as persistent storage for applications. We use this to provision the node volumes for Loki. --- **Amazon EKS Pod Identity Agent**: Manages AWS IAM roles for pods, allowing fine-grained access control to AWS resources without needing to store credentials in containers. This is how Loki will access the S3 bucket. - - **CoreDNS**: Provides internal DNS service for Kubernetes clusters, ensuring that services and pods can communicate with each other using DNS names. - - **kube-proxy**: Maintains network rules on nodes, enabling communication between pods and services within the cluster. - -@@ -198,77 +195,6 @@ The recommended method for connecting Loki to AWS S3 is to use an IAM role. This - ``` - **Make sure to replace the placeholder with your AWS account ID.** - --### Adding the policy to the S3 buckets -- --To allow the IAM role to access the S3 buckets, you need to add the policy to the bucket. You can do this using the AWS Management Console or the AWS CLI. The below steps show how to add the policy using the AWS CLI. -- --1. Create a bucket policy file named `bucket-policy-chunk.json` with the following content: -- -- ```json -- { -- ""Version"": ""2012-10-17"", -- ""Statement"": [ -- { -- ""Sid"": ""Statement1"", -- ""Effect"": ""Allow"", -- ""Principal"": { -- ""AWS"": ""arn:aws:iam:::role/LokiServiceAccountRole"" -- }, -- ""Action"": [ -- ""s3:PutObject"", -- ""s3:GetObject"", -- ""s3:DeleteObject"", -- ""s3:ListBucket"" -- ], -- ""Resource"": [ -- ""arn:aws:s3:::< CHUNK BUCKET NAME >"", -- ""arn:aws:s3:::< CHUNK BUCKET NAME >/*"" -- ] -- } -- ] -- } -- ``` -- **Make sure to replace the placeholders with your AWS account ID and the bucket names.** -- --1. Add the policy to the bucket: -- -- ```bash -- aws s3api put-bucket-policy --bucket --policy file://bucket-policy-chunk.json -- ``` --1. Create a bucket policy file named `bucket-policy-ruler.json` with the following content: -- -- ```json -- { -- ""Version"": ""2012-10-17"", -- ""Statement"": [ -- { -- ""Sid"": ""Statement1"", -- ""Effect"": ""Allow"", -- ""Principal"": { -- ""AWS"": ""arn:aws:iam:::role/LokiServiceAccountRole"" -- }, -- ""Action"": [ -- ""s3:PutObject"", -- ""s3:GetObject"", -- ""s3:DeleteObject"", -- ""s3:ListBucket"" -- ], -- ""Resource"": [ -- ""arn:aws:s3:::< RULER BUCKET NAME >"", -- ""arn:aws:s3:::< RULER BUCKET NAME >/*"" -- ] -- } -- ] -- } -- ``` -- **Make sure to replace the placeholders with your AWS account ID and the bucket names.** -- --1. Add the policy to the bucket: -- -- ```bash -- aws s3api put-bucket-policy --bucket --policy file://bucket-policy-ruler.json -- ``` -- - ## Deploying the Helm chart - - Before we can deploy the Loki Helm chart, we need to add the Grafana chart repository to Helm. This repository contains the Loki Helm chart. -@@ -322,8 +248,6 @@ Loki by default does not come with any authentication. Since we will be deployin - We create a literal secret with the username and password for Loki canary to authenticate with the Loki gateway. - **Make sure to replace the placeholders with your desired username and password.** - -- -- - ### Loki Helm chart configuration - - Create a `values.yaml` file choosing the configuration options that best suit your requirements. Below there is an example of `values.yaml` files for the Loki Helm chart in [microservices](https://grafana.com/docs/loki//get-started/deployment-modes/#microservices-mode) mode.",docs,AWS Cloud Guide Update (#14687) -96623440c96e291d3e059b444ca0a43a7d2f6ac8,2022-09-09 03:24:36,Old Schepperhand,"docs: fix alertmanager_client config settings (#6858) - -when setting -``` -basic_auth: - username: xxx - password: yyy -``` -following error gets thrown: -``` -failed parsing config: /etc/loki/loki-config.yaml: yaml: unmarshal errors: - line 123: field basic_auth not found in type base.NotifierConfig -``` - - - -**What this PR does / why we need it**: - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - - -**Checklist** -- [ ] Documentation added -- [ ] Tests updated -- [ ] Is this an important fix or new feature? Add an entry in the -`CHANGELOG.md`. -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/upgrading/_index.md`",False,"diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md -index 20ce2c5730781..47b0841b95f33 100644 ---- a/docs/sources/configuration/_index.md -+++ b/docs/sources/configuration/_index.md -@@ -709,9 +709,8 @@ alertmanager_client: - # Sets the `Authorization` header on every remote write request with the - # configured username and password. - # password and password_file are mutually exclusive. -- basic_auth: -- [username: ] -- [password: ] -+ [basic_auth_username: ] -+ [basic_auth_password: ] - - # Optional `Authorization` header configuration. - authorization:",docs,"fix alertmanager_client config settings (#6858) - -when setting -``` -basic_auth: - username: xxx - password: yyy -``` -following error gets thrown: -``` -failed parsing config: /etc/loki/loki-config.yaml: yaml: unmarshal errors: - line 123: field basic_auth not found in type base.NotifierConfig -``` - - - -**What this PR does / why we need it**: - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - - -**Checklist** -- [ ] Documentation added -- [ ] Tests updated -- [ ] Is this an important fix or new feature? Add an entry in the -`CHANGELOG.md`. -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/upgrading/_index.md`" -4c4c7e3010fae0ae51c9931fd9bd493b06576bd2,2023-03-27 13:27:25,René Scheibe,"Promtail: Fix examples how to build it (#8898) - -The build flags have to be provided before the package paths. - -Otherwise the `build` command fails with this error: -> malformed import path ""--tags=promtail_journal_enabled"": leading dash - -See `go help build`: -> usage: go build [-o output] [build flags] [packages]",False,"diff --git a/README.md b/README.md -index 45af4ec8dc27b..d15040ccc4af0 100644 ---- a/README.md -+++ b/README.md -@@ -120,14 +120,14 @@ With Journal support on Ubuntu, run with the following commands: - - ```bash - $ sudo apt install -y libsystemd-dev --$ go build ./clients/cmd/promtail --tags=promtail_journal_enabled -+$ go build --tags=promtail_journal_enabled ./clients/cmd/promtail - ``` - - With Journal support on CentOS, run with the following commands: - - ```bash - $ sudo yum install -y systemd-devel --$ go build ./clients/cmd/promtail --tags=promtail_journal_enabled -+$ go build --tags=promtail_journal_enabled ./clients/cmd/promtail - ``` - - Otherwise, to build Promtail without Journal support, run `go build` -diff --git a/docs/sources/upgrading/_index.md b/docs/sources/upgrading/_index.md -index a7bd7ca78f121..507db423cf704 100644 ---- a/docs/sources/upgrading/_index.md -+++ b/docs/sources/upgrading/_index.md -@@ -85,7 +85,7 @@ The go build tag `promtail_journal_enabled` should be passed to include Journal - If you need Journal support you will need to run go build with tag `promtail_journal_enabled`: - - ```shell --go build ./clients/cmd/promtail --tags=promtail_journal_enabled -+go build --tags=promtail_journal_enabled ./clients/cmd/promtail - ``` - Introducing this tag aims to relieve Linux/CentOS users with CGO enabled from installing libsystemd-dev/systemd-devel libraries if they don't need Journal support.",Promtail,"Fix examples how to build it (#8898) - -The build flags have to be provided before the package paths. - -Otherwise the `build` command fails with this error: -> malformed import path ""--tags=promtail_journal_enabled"": leading dash - -See `go help build`: -> usage: go build [-o output] [build flags] [packages]" -b1ea8ac050e5c2f740d82b7528204f1cc6295740,2021-03-16 13:43:48,Aditya C S,"support math functions in line_format and label_format (#3434) - -* support math functions in line_format and label_format - -* fix lint and tests - -* added docs and more tests - -* update doc - -* remove toString and int64 - -* doc adjustement. - -Signed-off-by: Cyril Tovena - -Co-authored-by: Cyril Tovena ",False,"diff --git a/docs/sources/logql/_index.md b/docs/sources/logql/_index.md -index cc4db8580a343..ea13b2fe585c7 100644 ---- a/docs/sources/logql/_index.md -+++ b/docs/sources/logql/_index.md -@@ -382,11 +382,21 @@ Will extract and rewrite the log line to only contains the query and the duratio - - You can use double quoted string for the template or backticks `` `{{.label_name}}` `` to avoid the need to escape special characters. - -+`line_format` also supports `math` functions. Example: -+ -+If we have fllowing labels `ip=1.1.1.1`, `status=200` and `duration=3000`(ms). We can divide the duration by `1000` to get the value in seconds. -+ -+```logql -+{container=""frontend""} | logfmt | line_format ""{{.ip}} {{.status}} {{div .duration 1000}}"" -+``` -+ -+The above query will give us the `line` as `1.1.1.1 200 3` -+ - See [template functions](template_functions/) to learn about available functions in the template format. - - #### Labels Format Expression - --The `| label_format` expression can renamed, modify or add labels. It takes as parameter a comma separated list of equality operations, enabling multiple operations at once. -+The `| label_format` expression can rename, modify or add labels. It takes as parameter a comma separated list of equality operations, enabling multiple operations at once. - - When both side are label identifiers, for example `dst=src`, the operation will rename the `src` label into `dst`. - -diff --git a/docs/sources/logql/template_functions.md b/docs/sources/logql/template_functions.md -index fcf77e321b01f..e28827842174d 100644 ---- a/docs/sources/logql/template_functions.md -+++ b/docs/sources/logql/template_functions.md -@@ -371,3 +371,227 @@ Examples: - {{ if .err hasSuffix ""Timeout"" }} timeout {{end}} - {{ if hasPrefix ""he"" ""hello"" }} yes {{end}} - ``` -+ -+## add -+ -+> **Note:** Added in Loki 2.3. -+ -+Sum numbers. Supports multiple numbers -+ -+Signature: `func(i ...interface{}) int64` -+ -+```template -+{{ add 3 2 5 }} // output: 10 -+``` -+ -+## sub -+ -+> **Note:** Added in Loki 2.3. -+ -+Subtract numbers. -+ -+Signature: `func(a, b interface{}) int64` -+ -+```template -+{{ sub 5 2 }} // output: 3 -+``` -+ -+## mul -+ -+> **Note:** Added in Loki 2.3. -+ -+Mulitply numbers. Supports multiple numbers. -+ -+Signature: `func(a interface{}, v ...interface{}) int64` -+ -+```template -+{{ mul 5 2 3}} // output: 30 -+``` -+ -+## div -+ -+> **Note:** Added in Loki 2.3. -+ -+Integer divide numbers. -+ -+Signature: `func(a, b interface{}) int64` -+ -+```template -+{{ div 10 2}} // output: 5 -+``` -+ -+## addf -+ -+> **Note:** Added in Loki 2.3. -+ -+Sum numbers. Supports multiple numbers. -+ -+Signature: `func(i ...interface{}) float64` -+ -+```template -+{{ addf 3.5 2 5 }} // output: 10.5 -+``` -+ -+## subf -+ -+> **Note:** Added in Loki 2.3. -+ -+Subtract numbers. Supports multiple numbers. -+ -+Signature: `func(a interface{}, v ...interface{}) float64` -+ -+```template -+{{ subf 5.5 2 1.5 }} // output: 2 -+``` -+ -+## mulf -+ -+> **Note:** Added in Loki 2.3. -+ -+Mulitply numbers. Supports multiple numbers -+ -+Signature: `func(a interface{}, v ...interface{}) float64` -+ -+```template -+{{ mulf 5.5 2 2.5 }} // output: 27.5 -+``` -+ -+## divf -+ -+> **Note:** Added in Loki 2.3. -+ -+Divide numbers. Supports multiple numbers. -+ -+Signature: `func(a interface{}, v ...interface{}) float64` -+ -+```template -+{{ divf 10 2 4}} // output: 1.25 -+``` -+ -+## mod -+ -+> **Note:** Added in Loki 2.3. -+ -+Modulo wit mod. -+ -+Signature: `func(a, b interface{}) int64` -+ -+```template -+{{ mod 10 3}} // output: 1 -+``` -+ -+## max -+ -+> **Note:** Added in Loki 2.3. -+ -+Return the largest of a series of integers: -+ -+Signature: `max(a interface{}, i ...interface{}) int64` -+ -+```template -+{{ max 1 2 3 }} //output 3 -+``` -+ -+## min -+ -+> **Note:** Added in Loki 2.3. -+ -+Return the smallest of a series of integers. -+ -+Signature: `min(a interface{}, i ...interface{}) int64` -+ -+```template -+{{ max 1 2 3 }} //output 1 -+``` -+ -+## maxf -+ -+> **Note:** Added in Loki 2.3. -+ -+Return the largest of a series of floats: -+ -+Signature: `maxf(a interface{}, i ...interface{}) float64` -+ -+```template -+{{ maxf 1 2.5 3 }} //output 3 -+``` -+ -+## minf -+ -+> **Note:** Added in Loki 2.3. -+ -+Return the smallest of a series of floats. -+ -+Signature: `minf(a interface{}, i ...interface{}) float64` -+ -+```template -+{{ minf 1 2.5 3 }} //output 1.5 -+``` -+ -+## ceil -+ -+> **Note:** Added in Loki 2.3. -+ -+Returns the greatest float value greater than or equal to input value -+ -+Signature: `ceil(a interface{}) float64` -+ -+```template -+{{ ceil 123.001 }} //output 124.0 -+``` -+ -+## floor -+ -+> **Note:** Added in Loki 2.3. -+ -+Returns the greatest float value less than or equal to input value -+ -+Signature: `floor(a interface{}) float64` -+ -+```template -+{{ floor 123.9999 }} //output 123.0 -+``` -+ -+## round -+ -+> **Note:** Added in Loki 2.3. -+ -+Returns a float value with the remainder rounded to the given number of digits after the decimal point. -+ -+Signature: `round(a interface{}, p int, rOpt ...float64) float64` -+ -+```template -+{{ round 123.555555 3 }} //output 123.556 -+``` -+ -+We can also provide a `roundOn` number as third parameter -+ -+```template -+{{ round 123.88571428571 5 .2 }} //output 123.88572 -+``` -+ -+With default `roundOn` of `.5` the above value would be `123.88571` -+ -+## int -+ -+> **Note:** Added in Loki 2.3. -+ -+Convert value to an int. -+ -+Signature: `toInt(v interface{}) int` -+ -+```template -+{{ ""3"" | int }} //output 3 -+``` -+ -+## float64 -+ -+> **Note:** Added in Loki 2.3. -+ -+Convert to a float64. -+ -+Signature: `toFloat64(v interface{}) float64` -+ -+```template -+{{ ""3.5"" | float64 }} //output 3.5 -+``` -diff --git a/go.mod b/go.mod -index 7540f3095c167..e9a62b68a11c4 100644 ---- a/go.mod -+++ b/go.mod -@@ -4,6 +4,7 @@ go 1.15 - - require ( - cloud.google.com/go/pubsub v1.3.1 -+ github.com/Masterminds/sprig/v3 v3.2.2 - github.com/NYTimes/gziphandler v1.1.1 - github.com/aws/aws-lambda-go v1.17.0 - github.com/bmatcuk/doublestar v1.2.2 -@@ -33,7 +34,7 @@ require ( - github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 - github.com/hashicorp/golang-lru v0.5.4 - github.com/hpcloud/tail v1.0.0 -- github.com/imdario/mergo v0.3.9 -+ github.com/imdario/mergo v0.3.11 - github.com/influxdata/go-syslog/v3 v3.0.1-0.20201128200927-a1889d947b48 - github.com/influxdata/telegraf v1.16.3 - github.com/jmespath/go-jmespath v0.4.0 -diff --git a/go.sum b/go.sum -index 0b3b69a633122..ea622e6a58202 100644 ---- a/go.sum -+++ b/go.sum -@@ -144,6 +144,12 @@ github.com/HdrHistogram/hdrhistogram-go v1.0.1 h1:GX8GAYDuhlFQnI2fRDHQhTlkHMz8bE - github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM= - github.com/Jeffail/gabs v1.1.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= - github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -+github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= -+github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -+github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= -+github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -+github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= -+github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= - github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5 h1:PPfYWScYacO3Q6JMCLkyh6Ea2Q/REDTMgmiTAeiV8Jg= - github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5/go.mod h1:xnKTFzjGUiZtiOagBsfnvomW+nJg2usB1ZpordQWqNM= - github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM= -@@ -926,13 +932,15 @@ github.com/hetznercloud/hcloud-go v1.22.0/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwI - github.com/hetznercloud/hcloud-go v1.23.1 h1:SkYdCa6x458cMSDz5GI18iPz5j2hicACiDP6J/s/bTs= - github.com/hetznercloud/hcloud-go v1.23.1/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= - github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= -+github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs= -+github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= - github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= - github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= - github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= - github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= - github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= --github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= --github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -+github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= -+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= - github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= - github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= - github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= -@@ -1149,6 +1157,8 @@ github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl - github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= - github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= - github.com/mitchellh/copystructure v0.0.0-20160804032330-cdac8253d00f/go.mod h1:eOsF2yLPlBBJPvD+nhl5QMTBSOBbOph6N7j/IDUw7PY= -+github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -+github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= - github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= - github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= - github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -@@ -1164,6 +1174,8 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh - github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= - github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= - github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -+github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -+github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= - github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= - github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= - github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= -@@ -1449,6 +1461,8 @@ github.com/shirou/gopsutil v2.20.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu - github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= - github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= - github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -+github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= -+github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= - github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= - github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= - github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -@@ -1496,6 +1510,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B - github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= - github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= - github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -+github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= -+github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= - github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= - github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= - github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -@@ -1707,6 +1723,7 @@ golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPh - golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= - golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= - golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -+golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= - golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= - golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= - golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -diff --git a/pkg/logql/log/fmt.go b/pkg/logql/log/fmt.go -index 270df15d68e55..3bebc203c2917 100644 ---- a/pkg/logql/log/fmt.go -+++ b/pkg/logql/log/fmt.go -@@ -7,6 +7,8 @@ import ( - ""strings"" - ""text/template"" - ""text/template/parse"" -+ -+ ""github.com/Masterminds/sprig/v3"" - ) - - var ( -@@ -15,7 +17,7 @@ var ( - - // Available map of functions for the text template engine. - functionMap = template.FuncMap{ -- // olds function deprecated. -+ // olds functions deprecated. - ""ToLower"": strings.ToLower, - ""ToUpper"": strings.ToUpper, - ""Replace"": strings.Replace, -@@ -25,28 +27,6 @@ var ( - ""TrimPrefix"": strings.TrimPrefix, - ""TrimSuffix"": strings.TrimSuffix, - ""TrimSpace"": strings.TrimSpace, -- -- // New function ported from https://github.com/Masterminds/sprig/ -- // Those function takes the string as the last parameter, allowing pipe chaining. -- // Example: .mylabel | lower | substring 0 5 -- ""lower"": strings.ToLower, -- ""upper"": strings.ToUpper, -- ""title"": strings.Title, -- ""trunc"": trunc, -- ""substr"": substring, -- ""contains"": contains, -- ""hasPrefix"": hasPrefix, -- ""hasSuffix"": hasSuffix, -- ""indent"": indent, -- ""nindent"": nindent, -- ""replace"": replace, -- ""repeat"": repeat, -- ""trim"": strings.TrimSpace, -- ""trimAll"": trimAll, -- ""trimSuffix"": trimSuffix, -- ""trimPrefix"": trimPrefix, -- -- // regex functions - ""regexReplaceAll"": func(regex string, s string, repl string) string { - r := regexp.MustCompile(regex) - return r.ReplaceAllString(s, repl) -@@ -56,6 +36,44 @@ var ( - return r.ReplaceAllLiteralString(s, repl) - }, - } -+ -+ // sprig template functions -+ templateFunctions = []string{ -+ ""lower"", -+ ""upper"", -+ ""title"", -+ ""trunc"", -+ ""substr"", -+ ""contains"", -+ ""hasPrefix"", -+ ""hasSuffix"", -+ ""indent"", -+ ""nindent"", -+ ""replace"", -+ ""repeat"", -+ ""trim"", -+ ""trimAll"", -+ ""trimSuffix"", -+ ""trimPrefix"", -+ ""int"", -+ ""float64"", -+ ""add"", -+ ""sub"", -+ ""mul"", -+ ""div"", -+ ""mod"", -+ ""addf"", -+ ""subf"", -+ ""mulf"", -+ ""divf"", -+ ""max"", -+ ""min"", -+ ""maxf"", -+ ""minf"", -+ ""ceil"", -+ ""floor"", -+ ""round"", -+ } - ) - - type LineFormatter struct { -@@ -65,7 +83,7 @@ type LineFormatter struct { - - // NewFormatter creates a new log line formatter from a given text template. - func NewFormatter(tmpl string) (*LineFormatter, error) { -- t, err := template.New(""line"").Option(""missingkey=zero"").Funcs(functionMap).Parse(tmpl) -+ t, err := template.New(""line"").Option(""missingkey=zero"").Funcs(templateFunctionMap()).Parse(tmpl) - if err != nil { - return nil, fmt.Errorf(""invalid line template: %s"", err) - } -@@ -189,7 +207,7 @@ func NewLabelsFormatter(fmts []LabelFmt) (*LabelsFormatter, error) { - for _, fm := range fmts { - toAdd := labelFormatter{LabelFmt: fm} - if !fm.Rename { -- t, err := template.New(""label"").Option(""missingkey=zero"").Funcs(functionMap).Parse(fm.Value) -+ t, err := template.New(""label"").Option(""missingkey=zero"").Funcs(templateFunctionMap()).Parse(fm.Value) - if err != nil { - return nil, fmt.Errorf(""invalid template for label '%s': %s"", fm.Name, err) - } -@@ -298,17 +316,13 @@ func substring(start, end int, s string) string { - return string(runes[start:end]) - } - --func contains(substr string, str string) bool { return strings.Contains(str, substr) } --func hasPrefix(substr string, str string) bool { return strings.HasPrefix(str, substr) } --func hasSuffix(substr string, str string) bool { return strings.HasSuffix(str, substr) } --func repeat(count int, str string) string { return strings.Repeat(str, count) } --func replace(old, new, src string) string { return strings.Replace(src, old, new, -1) } --func trimAll(a, b string) string { return strings.Trim(b, a) } --func trimSuffix(a, b string) string { return strings.TrimSuffix(b, a) } --func trimPrefix(a, b string) string { return strings.TrimPrefix(b, a) } --func indent(spaces int, v string) string { -- pad := strings.Repeat("" "", spaces) -- return pad + strings.Replace(v, ""\n"", ""\n""+pad, -1) -+// add sprig template functions maps -+func templateFunctionMap() map[string]interface{} { -+ sprigFuncMap := sprig.GenericFuncMap() -+ for _, v := range templateFunctions { -+ if function, ok := sprigFuncMap[v]; ok { -+ functionMap[v] = function -+ } -+ } -+ return functionMap - } -- --func nindent(spaces int, v string) string { return ""\n"" + indent(spaces, v) } -diff --git a/pkg/logql/log/fmt_test.go b/pkg/logql/log/fmt_test.go -index 8903973e65c01..9bb18b31fef37 100644 ---- a/pkg/logql/log/fmt_test.go -+++ b/pkg/logql/log/fmt_test.go -@@ -148,6 +148,62 @@ func Test_lineFormatter_Format(t *testing.T) { - []byte(""foo BLIP buzzblop""), - labels.Labels{{Name: ""foo"", Value: ""blip""}, {Name: ""bar"", Value: ""blop""}}, - }, -+ { -+ ""mathint"", -+ newMustLineFormatter(""{{ add .foo 1 | sub .bar | mul .baz | div .bazz}}""), -+ labels.Labels{{Name: ""foo"", Value: ""1""}, {Name: ""bar"", Value: ""3""}, {Name: ""baz"", Value: ""10""}, {Name: ""bazz"", Value: ""20""}}, -+ []byte(""2""), -+ labels.Labels{{Name: ""foo"", Value: ""1""}, {Name: ""bar"", Value: ""3""}, {Name: ""baz"", Value: ""10""}, {Name: ""bazz"", Value: ""20""}}, -+ }, -+ { -+ ""mathfloat"", -+ newMustLineFormatter(""{{ addf .foo 1.5 | subf .bar 1.5 | mulf .baz | divf .bazz }}""), -+ labels.Labels{{Name: ""foo"", Value: ""1.5""}, {Name: ""bar"", Value: ""5""}, {Name: ""baz"", Value: ""10.5""}, {Name: ""bazz"", Value: ""20.2""}}, -+ []byte(""3.8476190476190477""), -+ labels.Labels{{Name: ""foo"", Value: ""1.5""}, {Name: ""bar"", Value: ""5""}, {Name: ""baz"", Value: ""10.5""}, {Name: ""bazz"", Value: ""20.2""}}, -+ }, -+ { -+ ""mathfloatround"", -+ newMustLineFormatter(""{{ round (addf .foo 1.5 | subf .bar | mulf .baz | divf .bazz) 5 .2}}""), -+ labels.Labels{{Name: ""foo"", Value: ""1.5""}, {Name: ""bar"", Value: ""3.5""}, {Name: ""baz"", Value: ""10.5""}, {Name: ""bazz"", Value: ""20.4""}}, -+ []byte(""3.88572""), -+ labels.Labels{{Name: ""foo"", Value: ""1.5""}, {Name: ""bar"", Value: ""3.5""}, {Name: ""baz"", Value: ""10.5""}, {Name: ""bazz"", Value: ""20.4""}}, -+ }, -+ { -+ ""min"", -+ newMustLineFormatter(""min is {{ min .foo .bar .baz }} and max is {{ max .foo .bar .baz }}""), -+ labels.Labels{{Name: ""foo"", Value: ""5""}, {Name: ""bar"", Value: ""10""}, {Name: ""baz"", Value: ""15""}}, -+ []byte(""min is 5 and max is 15""), -+ labels.Labels{{Name: ""foo"", Value: ""5""}, {Name: ""bar"", Value: ""10""}, {Name: ""baz"", Value: ""15""}}, -+ }, -+ { -+ ""max"", -+ newMustLineFormatter(""minf is {{ minf .foo .bar .baz }} and maxf is {{maxf .foo .bar .baz}}""), -+ labels.Labels{{Name: ""foo"", Value: ""5.3""}, {Name: ""bar"", Value: ""10.5""}, {Name: ""baz"", Value: ""15.2""}}, -+ []byte(""minf is 5.3 and maxf is 15.2""), -+ labels.Labels{{Name: ""foo"", Value: ""5.3""}, {Name: ""bar"", Value: ""10.5""}, {Name: ""baz"", Value: ""15.2""}}, -+ }, -+ { -+ ""ceilfloor"", -+ newMustLineFormatter(""ceil is {{ ceil .foo }} and floor is {{floor .foo }}""), -+ labels.Labels{{Name: ""foo"", Value: ""5.3""}}, -+ []byte(""ceil is 6 and floor is 5""), -+ labels.Labels{{Name: ""foo"", Value: ""5.3""}}, -+ }, -+ { -+ ""mod"", -+ newMustLineFormatter(""mod is {{ mod .foo 3 }}""), -+ labels.Labels{{Name: ""foo"", Value: ""20""}}, -+ []byte(""mod is 2""), -+ labels.Labels{{Name: ""foo"", Value: ""20""}}, -+ }, -+ { -+ ""float64int"", -+ newMustLineFormatter(""{{ \""2.5\"" | float64 | int | add 10}}""), -+ labels.Labels{{Name: ""foo"", Value: ""2.5""}}, -+ []byte(""12""), -+ labels.Labels{{Name: ""foo"", Value: ""2.5""}}, -+ }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { -@@ -202,6 +258,12 @@ func Test_labelsFormatter_Format(t *testing.T) { - labels.Labels{{Name: ""foo"", Value: ""blip""}, {Name: ""bar"", Value: ""blop""}}, - labels.Labels{{Name: ""blip"", Value: ""BLIP and blop""}, {Name: ""bar"", Value: ""blip""}}, - }, -+ { -+ ""math"", -+ mustNewLabelsFormatter([]LabelFmt{NewTemplateLabelFmt(""status"", ""{{div .status 100 }}"")}), -+ labels.Labels{{Name: ""status"", Value: ""200""}}, -+ labels.Labels{{Name: ""status"", Value: ""2""}}, -+ }, - } - - for _, tt := range tests { -diff --git a/vendor/github.com/Masterminds/goutils/.travis.yml b/vendor/github.com/Masterminds/goutils/.travis.yml -new file mode 100644 -index 0000000000000..4025e01ec4a9b ---- /dev/null -+++ b/vendor/github.com/Masterminds/goutils/.travis.yml -@@ -0,0 +1,18 @@ -+language: go -+ -+go: -+ - 1.6 -+ - 1.7 -+ - 1.8 -+ - tip -+ -+script: -+ - go test -v -+ -+notifications: -+ webhooks: -+ urls: -+ - https://webhooks.gitter.im/e/06e3328629952dabe3e0 -+ on_success: change # options: [always|never|change] default: always -+ on_failure: always # options: [always|never|change] default: always -+ on_start: never # options: [always|never|change] default: always -diff --git a/vendor/github.com/Masterminds/goutils/CHANGELOG.md b/vendor/github.com/Masterminds/goutils/CHANGELOG.md -new file mode 100644 -index 0000000000000..d700ec47f2b82 ---- /dev/null -+++ b/vendor/github.com/Masterminds/goutils/CHANGELOG.md -@@ -0,0 +1,8 @@ -+# 1.0.1 (2017-05-31) -+ -+## Fixed -+- #21: Fix generation of alphanumeric strings (thanks @dbarranco) -+ -+# 1.0.0 (2014-04-30) -+ -+- Initial release. -diff --git a/vendor/github.com/Masterminds/goutils/LICENSE.txt b/vendor/github.com/Masterminds/goutils/LICENSE.txt -new file mode 100644 -index 0000000000000..d645695673349 ---- /dev/null -+++ b/vendor/github.com/Masterminds/goutils/LICENSE.txt -@@ -0,0 +1,202 @@ -+ -+ Apache License -+ Version 2.0, January 2004 -+ http://www.apache.org/licenses/ -+ -+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -+ -+ 1. Definitions. -+ -+ ""License"" shall mean the terms and conditions for use, reproduction, -+ and distribution as defined by Sections 1 through 9 of this document. -+ -+ ""Licensor"" shall mean the copyright owner or entity authorized by -+ the copyright owner that is granting the License. -+ -+ ""Legal Entity"" shall mean the union of the acting entity and all -+ other entities that control, are controlled by, or are under common -+ control with that entity. For the purposes of this definition, -+ ""control"" means (i) the power, direct or indirect, to cause the -+ direction or management of such entity, whether by contract or -+ otherwise, or (ii) ownership of fifty percent (50%) or more of the -+ outstanding shares, or (iii) beneficial ownership of such entity. -+ -+ ""You"" (or ""Your"") shall mean an individual or Legal Entity -+ exercising permissions granted by this License. -+ -+ ""Source"" form shall mean the preferred form for making modifications, -+ including but not limited to software source code, documentation -+ source, and configuration files. -+ -+ ""Object"" form shall mean any form resulting from mechanical -+ transformation or translation of a Source form, including but -+ not limited to compiled object code, generated documentation, -+ and conversions to other media types. -+ -+ ""Work"" shall mean the work of authorship, whether in Source or -+ Object form, made available under the License, as indicated by a -+ copyright notice that is included in or attached to the work -+ (an example is provided in the Appendix below). -+ -+ ""Derivative Works"" shall mean any work, whether in Source or Object -+ form, that is based on (or derived from) the Work and for which the -+ editorial revisions, annotations, elaborations, or other modifications -+ represent, as a whole, an original work of authorship. For the purposes -+ of this License, Derivative Works shall not include works that remain -+ separable from, or merely link (or bind by name) to the interfaces of, -+ the Work and Derivative Works thereof. -+ -+ ""Contribution"" shall mean any work of authorship, including -+ the original version of the Work and any modifications or additions -+ to that Work or Derivative Works thereof, that is intentionally -+ submitted to Licensor for inclusion in the Work by the copyright owner -+ or by an individual or Legal Entity authorized to submit on behalf of -+ the copyright owner. For the purposes of this definition, ""submitted"" -+ means any form of electronic, verbal, or written communication sent -+ to the Licensor or its representatives, including but not limited to -+ communication on electronic mailing lists, source code control systems, -+ and issue tracking systems that are managed by, or on behalf of, the -+ Licensor for the purpose of discussing and improving the Work, but -+ excluding communication that is conspicuously marked or otherwise -+ designated in writing by the copyright owner as ""Not a Contribution."" -+ -+ ""Contributor"" shall mean Licensor and any individual or Legal Entity -+ on behalf of whom a Contribution has been received by Licensor and -+ subsequently incorporated within the Work. -+ -+ 2. Grant of Copyright License. Subject to the terms and conditions of -+ this License, each Contributor hereby grants to You a perpetual, -+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable -+ copyright license to reproduce, prepare Derivative Works of, -+ publicly display, publicly perform, sublicense, and distribute the -+ Work and such Derivative Works in Source or Object form. -+ -+ 3. Grant of Patent License. Subject to the terms and conditions of -+ this License, each Contributor hereby grants to You a perpetual, -+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable -+ (except as stated in this section) patent license to make, have made, -+ use, offer to sell, sell, import, and otherwise transfer the Work, -+ where such license applies only to those patent claims licensable -+ by such Contributor that are necessarily infringed by their -+ Contribution(s) alone or by combination of their Contribution(s) -+ with the Work to which such Contribution(s) was submitted. If You -+ institute patent litigation against any entity (including a -+ cross-claim or counterclaim in a lawsuit) alleging that the Work -+ or a Contribution incorporated within the Work constitutes direct -+ or contributory patent infringement, then any patent licenses -+ granted to You under this License for that Work shall terminate -+ as of the date such litigation is filed. -+ -+ 4. Redistribution. You may reproduce and distribute copies of the -+ Work or Derivative Works thereof in any medium, with or without -+ modifications, and in Source or Object form, provided that You -+ meet the following conditions: -+ -+ (a) You must give any other recipients of the Work or -+ Derivative Works a copy of this License; and -+ -+ (b) You must cause any modified files to carry prominent notices -+ stating that You changed the files; and -+ -+ (c) You must retain, in the Source form of any Derivative Works -+ that You distribute, all copyright, patent, trademark, and -+ attribution notices from the Source form of the Work, -+ excluding those notices that do not pertain to any part of -+ the Derivative Works; and -+ -+ (d) If the Work includes a ""NOTICE"" text file as part of its -+ distribution, then any Derivative Works that You distribute must -+ include a readable copy of the attribution notices contained -+ within such NOTICE file, excluding those notices that do not -+ pertain to any part of the Derivative Works, in at least one -+ of the following places: within a NOTICE text file distributed -+ as part of the Derivative Works; within the Source form or -+ documentation, if provided along with the Derivative Works; or, -+ within a display generated by the Derivative Works, if and -+ wherever such third-party notices normally appear. The contents -+ of the NOTICE file are for informational purposes only and -+ do not modify the License. You may add Your own attribution -+ notices within Derivative Works that You distribute, alongside -+ or as an addendum to the NOTICE text from the Work, provided -+ that such additional attribution notices cannot be construed -+ as modifying the License. -+ -+ You may add Your own copyright statement to Your modifications and -+ may provide additional or different license terms and conditions -+ for use, reproduction, or distribution of Your modifications, or -+ for any such Derivative Works as a whole, provided Your use, -+ reproduction, and distribution of the Work otherwise complies with -+ the conditions stated in this License. -+ -+ 5. Submission of Contributions. Unless You explicitly state otherwise, -+ any Contribution intentionally submitted for inclusion in the Work -+ by You to the Licensor shall be under the terms and conditions of -+ this License, without any additional terms or conditions. -+ Notwithstanding the above, nothing herein shall supersede or modify -+ the terms of any separate license agreement you may have executed -+ with Licensor regarding such Contributions. -+ -+ 6. Trademarks. This License does not grant permission to use the trade -+ names, trademarks, service marks, or product names of the Licensor, -+ except as required for reasonable and customary use in describing the -+ origin of the Work and reproducing the content of the NOTICE file. -+ -+ 7. Disclaimer of Warranty. Unless required by applicable law or -+ agreed to in writing, Licensor provides the Work (and each -+ Contributor provides its Contributions) on an ""AS IS"" BASIS, -+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -+ implied, including, without limitation, any warranties or conditions -+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A -+ PARTICULAR PURPOSE. You are solely responsible for determining the -+ appropriateness of using or redistributing the Work and assume any -+ risks associated with Your exercise of permissions under this License. -+ -+ 8. Limitation of Liability. In no event and under no legal theory, -+ whether in tort (including negligence), contract, or otherwise, -+ unless required by applicable law (such as deliberate and grossly -+ negligent acts) or agreed to in writing, shall any Contributor be -+ liable to You for damages, including any direct, indirect, special, -+ incidental, or consequential damages of any character arising as a -+ result of this License or out of the use or inability to use the -+ Work (including but not limited to damages for loss of goodwill, -+ work stoppage, computer failure or malfunction, or any and all -+ other commercial damages or losses), even if such Contributor -+ has been advised of the possibility of such damages. -+ -+ 9. Accepting Warranty or Additional Liability. While redistributing -+ the Work or Derivative Works thereof, You may choose to offer, -+ and charge a fee for, acceptance of support, warranty, indemnity, -+ or other liability obligations and/or rights consistent with this -+ License. However, in accepting such obligations, You may act only -+ on Your own behalf and on Your sole responsibility, not on behalf -+ of any other Contributor, and only if You agree to indemnify, -+ defend, and hold each Contributor harmless for any liability -+ incurred by, or claims asserted against, such Contributor by reason -+ of your accepting any such warranty or additional liability. -+ -+ END OF TERMS AND CONDITIONS -+ -+ APPENDIX: How to apply the Apache License to your work. -+ -+ To apply the Apache License to your work, attach the following -+ boilerplate notice, with the fields enclosed by brackets ""[]"" -+ replaced with your own identifying information. (Don't include -+ the brackets!) The text should be enclosed in the appropriate -+ comment syntax for the file format. We also recommend that a -+ file or class name and description of purpose be included on the -+ same ""printed page"" as the copyright notice for easier -+ identification within third-party archives. -+ -+ Copyright [yyyy] [name of copyright owner] -+ -+ Licensed under the Apache License, Version 2.0 (the ""License""); -+ you may not use this file except in compliance with the License. -+ You may obtain a copy of the License at -+ -+ http://www.apache.org/licenses/LICENSE-2.0 -+ -+ Unless required by applicable law or agreed to in writing, software -+ distributed under the License is distributed on an ""AS IS"" BASIS, -+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+ See the License for the specific language governing permissions and -+ limitations under the License. -diff --git a/vendor/github.com/Masterminds/goutils/README.md b/vendor/github.com/Masterminds/goutils/README.md -new file mode 100644 -index 0000000000000..163ffe72a82d1 ---- /dev/null -+++ b/vendor/github.com/Masterminds/goutils/README.md -@@ -0,0 +1,70 @@ -+GoUtils -+=========== -+[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) -+[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils) -+ -+ -+GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some -+string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes: -+* WordUtils -+* RandomStringUtils -+* StringUtils (partial implementation) -+ -+## Installation -+If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this: -+ -+ go get github.com/Masterminds/goutils -+ -+If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils. -+ -+ -+## Documentation -+GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) -+ -+ -+## Usage -+The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file). -+ -+ package main -+ -+ import ( -+ ""fmt"" -+ ""github.com/Masterminds/goutils"" -+ ) -+ -+ func main() { -+ -+ // EXAMPLE 1: A goutils function which returns no errors -+ fmt.Println (goutils.Initials(""John Doe Foo"")) // Prints out ""JDF"" -+ -+ } -+Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file). -+ -+ package main -+ -+ import ( -+ ""fmt"" -+ ""github.com/Masterminds/goutils"" -+ ) -+ -+ func main() { -+ -+ // EXAMPLE 2: A goutils function which returns an error -+ rand1, err1 := goutils.Random (-1, 0, 0, true, true) -+ -+ if err1 != nil { -+ fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) -+ } else { -+ fmt.Println(rand1) -+ } -+ -+ } -+ -+## License -+GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license. -+ -+## Issue Reporting -+Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues -+ -+## Website -+* [GoUtils webpage](http://Masterminds.github.io/goutils/) -diff --git a/vendor/github.com/Masterminds/goutils/appveyor.yml b/vendor/github.com/Masterminds/goutils/appveyor.yml -new file mode 100644 -index 0000000000000..657564a8474df ---- /dev/null -+++ b/vendor/github.com/Masterminds/goutils/appveyor.yml -@@ -0,0 +1,21 @@ -+version: build-{build}.{branch} -+ -+clone_folder: C:\gopath\src\github.com\Masterminds\goutils -+shallow_clone: true -+ -+environment: -+ GOPATH: C:\gopath -+ -+platform: -+ - x64 -+ -+build: off -+ -+install: -+ - go version -+ - go env -+ -+test_script: -+ - go test -v -+ -+deploy: off -diff --git a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go -new file mode 100644 -index 0000000000000..8dbd9248583a8 ---- /dev/null -+++ b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go -@@ -0,0 +1,230 @@ -+/* -+Copyright 2014 Alexander Okoli -+ -+Licensed under the Apache License, Version 2.0 (the ""License""); -+you may not use this file except in compliance with the License. -+You may obtain a copy of the License at -+ -+ http://www.apache.org/licenses/LICENSE-2.0 -+ -+Unless required by applicable law or agreed to in writing, software -+distributed under the License is distributed on an ""AS IS"" BASIS, -+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+See the License for the specific language governing permissions and -+limitations under the License. -+*/ -+ -+package goutils -+ -+import ( -+ ""crypto/rand"" -+ ""fmt"" -+ ""math"" -+ ""math/big"" -+ ""unicode"" -+) -+ -+/* -+CryptoRandomNonAlphaNumeric creates a random string whose length is the number of characters specified. -+Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). -+ -+Parameter: -+ count - the length of random string to create -+ -+Returns: -+ string - the random string -+ error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -+*/ -+func CryptoRandomNonAlphaNumeric(count int) (string, error) { -+ return CryptoRandomAlphaNumericCustom(count, false, false) -+} -+ -+/* -+CryptoRandomAscii creates a random string whose length is the number of characters specified. -+Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). -+ -+Parameter: -+ count - the length of random string to create -+ -+Returns: -+ string - the random string -+ error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -+*/ -+func CryptoRandomAscii(count int) (string, error) { -+ return CryptoRandom(count, 32, 127, false, false) -+} -+ -+/* -+CryptoRandomNumeric creates a random string whose length is the number of characters specified. -+Characters will be chosen from the set of numeric characters. -+ -+Parameter: -+ count - the length of random string to create -+ -+Returns: -+ string - the random string -+ error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -+*/ -+func CryptoRandomNumeric(count int) (string, error) { -+ return CryptoRandom(count, 0, 0, false, true) -+} -+ -+/* -+CryptoRandomAlphabetic creates a random string whose length is the number of characters specified. -+Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. -+ -+Parameters: -+ count - the length of random string to create -+ letters - if true, generated string may include alphabetic characters -+ numbers - if true, generated string may include numeric characters -+ -+Returns: -+ string - the random string -+ error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -+*/ -+func CryptoRandomAlphabetic(count int) (string, error) { -+ return CryptoRandom(count, 0, 0, true, false) -+} -+ -+/* -+CryptoRandomAlphaNumeric creates a random string whose length is the number of characters specified. -+Characters will be chosen from the set of alpha-numeric characters. -+ -+Parameter: -+ count - the length of random string to create -+ -+Returns: -+ string - the random string -+ error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -+*/ -+func CryptoRandomAlphaNumeric(count int) (string, error) { -+ return CryptoRandom(count, 0, 0, true, true) -+} -+ -+/* -+CryptoRandomAlphaNumericCustom creates a random string whose length is the number of characters specified. -+Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. -+ -+Parameters: -+ count - the length of random string to create -+ letters - if true, generated string may include alphabetic characters -+ numbers - if true, generated string may include numeric characters -+ -+Returns: -+ string - the random string -+ error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -+*/ -+func CryptoRandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { -+ return CryptoRandom(count, 0, 0, letters, numbers) -+} -+ -+/* -+CryptoRandom creates a random string based on a variety of options, using using golang's crypto/rand source of randomness. -+If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, -+unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. -+If chars is not nil, characters stored in chars that are between start and end are chosen. -+ -+Parameters: -+ count - the length of random string to create -+ start - the position in set of chars (ASCII/Unicode int) to start at -+ end - the position in set of chars (ASCII/Unicode int) to end before -+ letters - if true, generated string may include alphabetic characters -+ numbers - if true, generated string may include numeric characters -+ chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. -+ -+Returns: -+ string - the random string -+ error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) -+*/ -+func CryptoRandom(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { -+ if count == 0 { -+ return """", nil -+ } else if count < 0 { -+ err := fmt.Errorf(""randomstringutils illegal argument: Requested random string length %v is less than 0."", count) // equiv to err := errors.New(""..."") -+ return """", err -+ } -+ if chars != nil && len(chars) == 0 { -+ err := fmt.Errorf(""randomstringutils illegal argument: The chars array must not be empty"") -+ return """", err -+ } -+ -+ if start == 0 && end == 0 { -+ if chars != nil { -+ end = len(chars) -+ } else { -+ if !letters && !numbers { -+ end = math.MaxInt32 -+ } else { -+ end = 'z' + 1 -+ start = ' ' -+ } -+ } -+ } else { -+ if end <= start { -+ err := fmt.Errorf(""randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)"", end, start) -+ return """", err -+ } -+ -+ if chars != nil && end > len(chars) { -+ err := fmt.Errorf(""randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)"", end, len(chars)) -+ return """", err -+ } -+ } -+ -+ buffer := make([]rune, count) -+ gap := end - start -+ -+ // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 -+ // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 -+ -+ for count != 0 { -+ count-- -+ var ch rune -+ if chars == nil { -+ ch = rune(getCryptoRandomInt(gap) + int64(start)) -+ } else { -+ ch = chars[getCryptoRandomInt(gap)+int64(start)] -+ } -+ -+ if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { -+ if ch >= 56320 && ch <= 57343 { // low surrogate range -+ if count == 0 { -+ count++ -+ } else { -+ // Insert low surrogate -+ buffer[count] = ch -+ count-- -+ // Insert high surrogate -+ buffer[count] = rune(55296 + getCryptoRandomInt(128)) -+ } -+ } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) -+ if count == 0 { -+ count++ -+ } else { -+ // Insert low surrogate -+ buffer[count] = rune(56320 + getCryptoRandomInt(128)) -+ count-- -+ // Insert high surrogate -+ buffer[count] = ch -+ } -+ } else if ch >= 56192 && ch <= 56319 { -+ // private high surrogate, skip it -+ count++ -+ } else { -+ // not one of the surrogates* -+ buffer[count] = ch -+ } -+ } else { -+ count++ -+ } -+ } -+ return string(buffer), nil -+} -+ -+func getCryptoRandomInt(count int) int64 { -+ nBig, err := rand.Int(rand.Reader, big.NewInt(int64(count))) -+ if err != nil { -+ panic(err) -+ } -+ return nBig.Int64() -+} -diff --git a/vendor/github.com/Masterminds/goutils/randomstringutils.go b/vendor/github.com/Masterminds/goutils/randomstringutils.go -new file mode 100644 -index 0000000000000..272670231ab14 ---- /dev/null -+++ b/vendor/github.com/Masterminds/goutils/randomstringutils.go -@@ -0,0 +1,248 @@ -+/* -+Copyright 2014 Alexander Okoli -+ -+Licensed under the Apache License, Version 2.0 (the ""License""); -+you may not use this file except in compliance with the License. -+You may obtain a copy of the License at -+ -+ http://www.apache.org/licenses/LICENSE-2.0 -+ -+Unless required by applicable law or agreed to in writing, software -+distributed under the License is distributed on an ""AS IS"" BASIS, -+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+See the License for the specific language governing permissions and -+limitations under the License. -+*/ -+ -+package goutils -+ -+import ( -+ ""fmt"" -+ ""math"" -+ ""math/rand"" -+ ""time"" -+ ""unicode"" -+) -+ -+// RANDOM provides the time-based seed used to generate random numbers -+var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano())) -+ -+/* -+RandomNonAlphaNumeric creates a random string whose length is the number of characters specified. -+Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). -+ -+Parameter: -+ count - the length of random string to create -+ -+Returns: -+ string - the random string -+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -+*/ -+func RandomNonAlphaNumeric(count int) (string, error) { -+ return RandomAlphaNumericCustom(count, false, false) -+} -+ -+/* -+RandomAscii creates a random string whose length is the number of characters specified. -+Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). -+ -+Parameter: -+ count - the length of random string to create -+ -+Returns: -+ string - the random string -+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -+*/ -+func RandomAscii(count int) (string, error) { -+ return Random(count, 32, 127, false, false) -+} -+ -+/* -+RandomNumeric creates a random string whose length is the number of characters specified. -+Characters will be chosen from the set of numeric characters. -+ -+Parameter: -+ count - the length of random string to create -+ -+Returns: -+ string - the random string -+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -+*/ -+func RandomNumeric(count int) (string, error) { -+ return Random(count, 0, 0, false, true) -+} -+ -+/* -+RandomAlphabetic creates a random string whose length is the number of characters specified. -+Characters will be chosen from the set of alphabetic characters. -+ -+Parameters: -+ count - the length of random string to create -+ -+Returns: -+ string - the random string -+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -+*/ -+func RandomAlphabetic(count int) (string, error) { -+ return Random(count, 0, 0, true, false) -+} -+ -+/* -+RandomAlphaNumeric creates a random string whose length is the number of characters specified. -+Characters will be chosen from the set of alpha-numeric characters. -+ -+Parameter: -+ count - the length of random string to create -+ -+Returns: -+ string - the random string -+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -+*/ -+func RandomAlphaNumeric(count int) (string, error) { -+ return Random(count, 0, 0, true, true) -+} -+ -+/* -+RandomAlphaNumericCustom creates a random string whose length is the number of characters specified. -+Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. -+ -+Parameters: -+ count - the length of random string to create -+ letters - if true, generated string may include alphabetic characters -+ numbers - if true, generated string may include numeric characters -+ -+Returns: -+ string - the random string -+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -+*/ -+func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { -+ return Random(count, 0, 0, letters, numbers) -+} -+ -+/* -+Random creates a random string based on a variety of options, using default source of randomness. -+This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but -+instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance. -+ -+Parameters: -+ count - the length of random string to create -+ start - the position in set of chars (ASCII/Unicode int) to start at -+ end - the position in set of chars (ASCII/Unicode int) to end before -+ letters - if true, generated string may include alphabetic characters -+ numbers - if true, generated string may include numeric characters -+ chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. -+ -+Returns: -+ string - the random string -+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -+*/ -+func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { -+ return RandomSeed(count, start, end, letters, numbers, chars, RANDOM) -+} -+ -+/* -+RandomSeed creates a random string based on a variety of options, using supplied source of randomness. -+If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, -+unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. -+If chars is not nil, characters stored in chars that are between start and end are chosen. -+This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance -+with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably. -+ -+Parameters: -+ count - the length of random string to create -+ start - the position in set of chars (ASCII/Unicode decimals) to start at -+ end - the position in set of chars (ASCII/Unicode decimals) to end before -+ letters - if true, generated string may include alphabetic characters -+ numbers - if true, generated string may include numeric characters -+ chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. -+ random - a source of randomness. -+ -+Returns: -+ string - the random string -+ error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) -+*/ -+func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) { -+ -+ if count == 0 { -+ return """", nil -+ } else if count < 0 { -+ err := fmt.Errorf(""randomstringutils illegal argument: Requested random string length %v is less than 0."", count) // equiv to err := errors.New(""..."") -+ return """", err -+ } -+ if chars != nil && len(chars) == 0 { -+ err := fmt.Errorf(""randomstringutils illegal argument: The chars array must not be empty"") -+ return """", err -+ } -+ -+ if start == 0 && end == 0 { -+ if chars != nil { -+ end = len(chars) -+ } else { -+ if !letters && !numbers { -+ end = math.MaxInt32 -+ } else { -+ end = 'z' + 1 -+ start = ' ' -+ } -+ } -+ } else { -+ if end <= start { -+ err := fmt.Errorf(""randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)"", end, start) -+ return """", err -+ } -+ -+ if chars != nil && end > len(chars) { -+ err := fmt.Errorf(""randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)"", end, len(chars)) -+ return """", err -+ } -+ } -+ -+ buffer := make([]rune, count) -+ gap := end - start -+ -+ // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 -+ // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 -+ -+ for count != 0 { -+ count-- -+ var ch rune -+ if chars == nil { -+ ch = rune(random.Intn(gap) + start) -+ } else { -+ ch = chars[random.Intn(gap)+start] -+ } -+ -+ if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { -+ if ch >= 56320 && ch <= 57343 { // low surrogate range -+ if count == 0 { -+ count++ -+ } else { -+ // Insert low surrogate -+ buffer[count] = ch -+ count-- -+ // Insert high surrogate -+ buffer[count] = rune(55296 + random.Intn(128)) -+ } -+ } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) -+ if count == 0 { -+ count++ -+ } else { -+ // Insert low surrogate -+ buffer[count] = rune(56320 + random.Intn(128)) -+ count-- -+ // Insert high surrogate -+ buffer[count] = ch -+ } -+ } else if ch >= 56192 && ch <= 56319 { -+ // private high surrogate, skip it -+ count++ -+ } else { -+ // not one of the surrogates* -+ buffer[count] = ch -+ } -+ } else { -+ count++ -+ } -+ } -+ return string(buffer), nil -+} -diff --git a/vendor/github.com/Masterminds/goutils/stringutils.go b/vendor/github.com/Masterminds/goutils/stringutils.go -new file mode 100644 -index 0000000000000..741bb530e8ad7 ---- /dev/null -+++ b/vendor/github.com/Masterminds/goutils/stringutils.go -@@ -0,0 +1,240 @@ -+/* -+Copyright 2014 Alexander Okoli -+ -+Licensed under the Apache License, Version 2.0 (the ""License""); -+you may not use this file except in compliance with the License. -+You may obtain a copy of the License at -+ -+ http://www.apache.org/licenses/LICENSE-2.0 -+ -+Unless required by applicable law or agreed to in writing, software -+distributed under the License is distributed on an ""AS IS"" BASIS, -+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+See the License for the specific language governing permissions and -+limitations under the License. -+*/ -+ -+package goutils -+ -+import ( -+ ""bytes"" -+ ""fmt"" -+ ""strings"" -+ ""unicode"" -+) -+ -+// Typically returned by functions where a searched item cannot be found -+const INDEX_NOT_FOUND = -1 -+ -+/* -+Abbreviate abbreviates a string using ellipses. This will turn the string ""Now is the time for all good men"" into ""Now is the time for..."" -+ -+Specifically, the algorithm is as follows: -+ -+ - If str is less than maxWidth characters long, return it. -+ - Else abbreviate it to (str[0:maxWidth - 3] + ""...""). -+ - If maxWidth is less than 4, return an illegal argument error. -+ - In no case will it return a string of length greater than maxWidth. -+ -+Parameters: -+ str - the string to check -+ maxWidth - maximum length of result string, must be at least 4 -+ -+Returns: -+ string - abbreviated string -+ error - if the width is too small -+*/ -+func Abbreviate(str string, maxWidth int) (string, error) { -+ return AbbreviateFull(str, 0, maxWidth) -+} -+ -+/* -+AbbreviateFull abbreviates a string using ellipses. This will turn the string ""Now is the time for all good men"" into ""...is the time for..."" -+This function works like Abbreviate(string, int), but allows you to specify a ""left edge"" offset. Note that this left edge is not -+necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear -+somewhere in the result. -+In no case will it return a string of length greater than maxWidth. -+ -+Parameters: -+ str - the string to check -+ offset - left edge of source string -+ maxWidth - maximum length of result string, must be at least 4 -+ -+Returns: -+ string - abbreviated string -+ error - if the width is too small -+*/ -+func AbbreviateFull(str string, offset int, maxWidth int) (string, error) { -+ if str == """" { -+ return """", nil -+ } -+ if maxWidth < 4 { -+ err := fmt.Errorf(""stringutils illegal argument: Minimum abbreviation width is 4"") -+ return """", err -+ } -+ if len(str) <= maxWidth { -+ return str, nil -+ } -+ if offset > len(str) { -+ offset = len(str) -+ } -+ if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7 -+ offset = len(str) - (maxWidth - 3) -+ } -+ abrevMarker := ""..."" -+ if offset <= 4 { -+ return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker; -+ } -+ if maxWidth < 7 { -+ err := fmt.Errorf(""stringutils illegal argument: Minimum abbreviation width with offset is 7"") -+ return """", err -+ } -+ if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15 -+ abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3)) -+ return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3); -+ } -+ return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3)); -+} -+ -+/* -+DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune). -+It returns the string without whitespaces. -+ -+Parameter: -+ str - the string to delete whitespace from, may be nil -+ -+Returns: -+ the string without whitespaces -+*/ -+func DeleteWhiteSpace(str string) string { -+ if str == """" { -+ return str -+ } -+ sz := len(str) -+ var chs bytes.Buffer -+ count := 0 -+ for i := 0; i < sz; i++ { -+ ch := rune(str[i]) -+ if !unicode.IsSpace(ch) { -+ chs.WriteRune(ch) -+ count++ -+ } -+ } -+ if count == sz { -+ return str -+ } -+ return chs.String() -+} -+ -+/* -+IndexOfDifference compares two strings, and returns the index at which the strings begin to differ. -+ -+Parameters: -+ str1 - the first string -+ str2 - the second string -+ -+Returns: -+ the index where str1 and str2 begin to differ; -1 if they are equal -+*/ -+func IndexOfDifference(str1 string, str2 string) int { -+ if str1 == str2 { -+ return INDEX_NOT_FOUND -+ } -+ if IsEmpty(str1) || IsEmpty(str2) { -+ return 0 -+ } -+ var i int -+ for i = 0; i < len(str1) && i < len(str2); i++ { -+ if rune(str1[i]) != rune(str2[i]) { -+ break -+ } -+ } -+ if i < len(str2) || i < len(str1) { -+ return i -+ } -+ return INDEX_NOT_FOUND -+} -+ -+/* -+IsBlank checks if a string is whitespace or empty (""""). Observe the following behavior: -+ -+ goutils.IsBlank("""") = true -+ goutils.IsBlank("" "") = true -+ goutils.IsBlank(""bob"") = false -+ goutils.IsBlank("" bob "") = false -+ -+Parameter: -+ str - the string to check -+ -+Returns: -+ true - if the string is whitespace or empty ("""") -+*/ -+func IsBlank(str string) bool { -+ strLen := len(str) -+ if str == """" || strLen == 0 { -+ return true -+ } -+ for i := 0; i < strLen; i++ { -+ if unicode.IsSpace(rune(str[i])) == false { -+ return false -+ } -+ } -+ return true -+} -+ -+/* -+IndexOf returns the index of the first instance of sub in str, with the search beginning from the -+index start point specified. -1 is returned if sub is not present in str. -+ -+An empty string ("""") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero. -+A start position greater than the string length returns -1. -+ -+Parameters: -+ str - the string to check -+ sub - the substring to find -+ start - the start position; negative treated as zero -+ -+Returns: -+ the first index where the sub string was found (always >= start) -+*/ -+func IndexOf(str string, sub string, start int) int { -+ -+ if start < 0 { -+ start = 0 -+ } -+ -+ if len(str) < start { -+ return INDEX_NOT_FOUND -+ } -+ -+ if IsEmpty(str) || IsEmpty(sub) { -+ return INDEX_NOT_FOUND -+ } -+ -+ partialIndex := strings.Index(str[start:len(str)], sub) -+ if partialIndex == -1 { -+ return INDEX_NOT_FOUND -+ } -+ return partialIndex + start -+} -+ -+// IsEmpty checks if a string is empty (""""). Returns true if empty, and false otherwise. -+func IsEmpty(str string) bool { -+ return len(str) == 0 -+} -+ -+// Returns either the passed in string, or if the string is empty, the value of defaultStr. -+func DefaultString(str string, defaultStr string) string { -+ if IsEmpty(str) { -+ return defaultStr -+ } -+ return str -+} -+ -+// Returns either the passed in string, or if the string is whitespace, empty (""""), the value of defaultStr. -+func DefaultIfBlank(str string, defaultStr string) string { -+ if IsBlank(str) { -+ return defaultStr -+ } -+ return str -+} -diff --git a/vendor/github.com/Masterminds/goutils/wordutils.go b/vendor/github.com/Masterminds/goutils/wordutils.go -new file mode 100644 -index 0000000000000..034cad8e2107d ---- /dev/null -+++ b/vendor/github.com/Masterminds/goutils/wordutils.go -@@ -0,0 +1,357 @@ -+/* -+Copyright 2014 Alexander Okoli -+ -+Licensed under the Apache License, Version 2.0 (the ""License""); -+you may not use this file except in compliance with the License. -+You may obtain a copy of the License at -+ -+ http://www.apache.org/licenses/LICENSE-2.0 -+ -+Unless required by applicable law or agreed to in writing, software -+distributed under the License is distributed on an ""AS IS"" BASIS, -+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+See the License for the specific language governing permissions and -+limitations under the License. -+*/ -+ -+/* -+Package goutils provides utility functions to manipulate strings in various ways. -+The code snippets below show examples of how to use goutils. Some functions return -+errors while others do not, so usage would vary as a result. -+ -+Example: -+ -+ package main -+ -+ import ( -+ ""fmt"" -+ ""github.com/aokoli/goutils"" -+ ) -+ -+ func main() { -+ -+ // EXAMPLE 1: A goutils function which returns no errors -+ fmt.Println (goutils.Initials(""John Doe Foo"")) // Prints out ""JDF"" -+ -+ -+ -+ // EXAMPLE 2: A goutils function which returns an error -+ rand1, err1 := goutils.Random (-1, 0, 0, true, true) -+ -+ if err1 != nil { -+ fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) -+ } else { -+ fmt.Println(rand1) -+ } -+ } -+*/ -+package goutils -+ -+import ( -+ ""bytes"" -+ ""strings"" -+ ""unicode"" -+) -+ -+// VERSION indicates the current version of goutils -+const VERSION = ""1.0.0"" -+ -+/* -+Wrap wraps a single line of text, identifying words by ' '. -+New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped. -+Leading spaces on a new line are stripped. Trailing spaces are not stripped. -+ -+Parameters: -+ str - the string to be word wrapped -+ wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 -+ -+Returns: -+ a line with newlines inserted -+*/ -+func Wrap(str string, wrapLength int) string { -+ return WrapCustom(str, wrapLength, """", false) -+} -+ -+/* -+WrapCustom wraps a single line of text, identifying words by ' '. -+Leading spaces on a new line are stripped. Trailing spaces are not stripped. -+ -+Parameters: -+ str - the string to be word wrapped -+ wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 -+ newLineStr - the string to insert for a new line, """" uses '\n' -+ wrapLongWords - true if long words (such as URLs) should be wrapped -+ -+Returns: -+ a line with newlines inserted -+*/ -+func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string { -+ -+ if str == """" { -+ return """" -+ } -+ if newLineStr == """" { -+ newLineStr = ""\n"" // TODO Assumes ""\n"" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons -+ } -+ if wrapLength < 1 { -+ wrapLength = 1 -+ } -+ -+ inputLineLength := len(str) -+ offset := 0 -+ -+ var wrappedLine bytes.Buffer -+ -+ for inputLineLength-offset > wrapLength { -+ -+ if rune(str[offset]) == ' ' { -+ offset++ -+ continue -+ } -+ -+ end := wrapLength + offset + 1 -+ spaceToWrapAt := strings.LastIndex(str[offset:end], "" "") + offset -+ -+ if spaceToWrapAt >= offset { -+ // normal word (not longer than wrapLength) -+ wrappedLine.WriteString(str[offset:spaceToWrapAt]) -+ wrappedLine.WriteString(newLineStr) -+ offset = spaceToWrapAt + 1 -+ -+ } else { -+ // long word or URL -+ if wrapLongWords { -+ end := wrapLength + offset -+ // long words are wrapped one line at a time -+ wrappedLine.WriteString(str[offset:end]) -+ wrappedLine.WriteString(newLineStr) -+ offset += wrapLength -+ } else { -+ // long words aren't wrapped, just extended beyond limit -+ end := wrapLength + offset -+ index := strings.IndexRune(str[end:len(str)], ' ') -+ if index == -1 { -+ wrappedLine.WriteString(str[offset:len(str)]) -+ offset = inputLineLength -+ } else { -+ spaceToWrapAt = index + end -+ wrappedLine.WriteString(str[offset:spaceToWrapAt]) -+ wrappedLine.WriteString(newLineStr) -+ offset = spaceToWrapAt + 1 -+ } -+ } -+ } -+ } -+ -+ wrappedLine.WriteString(str[offset:len(str)]) -+ -+ return wrappedLine.String() -+ -+} -+ -+/* -+Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed. -+To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune). -+The delimiters represent a set of characters understood to separate words. The first string character -+and the first non-delimiter character after a delimiter will be capitalized. A """" input string returns """". -+Capitalization uses the Unicode title case, normally equivalent to upper case. -+ -+Parameters: -+ str - the string to capitalize -+ delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter -+ -+Returns: -+ capitalized string -+*/ -+func Capitalize(str string, delimiters ...rune) string { -+ -+ var delimLen int -+ -+ if delimiters == nil { -+ delimLen = -1 -+ } else { -+ delimLen = len(delimiters) -+ } -+ -+ if str == """" || delimLen == 0 { -+ return str -+ } -+ -+ buffer := []rune(str) -+ capitalizeNext := true -+ for i := 0; i < len(buffer); i++ { -+ ch := buffer[i] -+ if isDelimiter(ch, delimiters...) { -+ capitalizeNext = true -+ } else if capitalizeNext { -+ buffer[i] = unicode.ToTitle(ch) -+ capitalizeNext = false -+ } -+ } -+ return string(buffer) -+ -+} -+ -+/* -+CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a -+titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood -+to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized. -+Capitalization uses the Unicode title case, normally equivalent to upper case. -+ -+Parameters: -+ str - the string to capitalize fully -+ delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter -+ -+Returns: -+ capitalized string -+*/ -+func CapitalizeFully(str string, delimiters ...rune) string { -+ -+ var delimLen int -+ -+ if delimiters == nil { -+ delimLen = -1 -+ } else { -+ delimLen = len(delimiters) -+ } -+ -+ if str == """" || delimLen == 0 { -+ return str -+ } -+ str = strings.ToLower(str) -+ return Capitalize(str, delimiters...) -+} -+ -+/* -+Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed. -+The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter -+character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char). -+ -+Parameters: -+ str - the string to uncapitalize fully -+ delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter -+ -+Returns: -+ uncapitalized string -+*/ -+func Uncapitalize(str string, delimiters ...rune) string { -+ -+ var delimLen int -+ -+ if delimiters == nil { -+ delimLen = -1 -+ } else { -+ delimLen = len(delimiters) -+ } -+ -+ if str == """" || delimLen == 0 { -+ return str -+ } -+ -+ buffer := []rune(str) -+ uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char. -+ for i := 0; i < len(buffer); i++ { -+ ch := buffer[i] -+ if isDelimiter(ch, delimiters...) { -+ uncapitalizeNext = true -+ } else if uncapitalizeNext { -+ buffer[i] = unicode.ToLower(ch) -+ uncapitalizeNext = false -+ } -+ } -+ return string(buffer) -+} -+ -+/* -+SwapCase swaps the case of a string using a word based algorithm. -+ -+Conversion algorithm: -+ -+ Upper case character converts to Lower case -+ Title case character converts to Lower case -+ Lower case character after Whitespace or at start converts to Title case -+ Other Lower case character converts to Upper case -+ Whitespace is defined by unicode.IsSpace(char). -+ -+Parameters: -+ str - the string to swap case -+ -+Returns: -+ the changed string -+*/ -+func SwapCase(str string) string { -+ if str == """" { -+ return str -+ } -+ buffer := []rune(str) -+ -+ whitespace := true -+ -+ for i := 0; i < len(buffer); i++ { -+ ch := buffer[i] -+ if unicode.IsUpper(ch) { -+ buffer[i] = unicode.ToLower(ch) -+ whitespace = false -+ } else if unicode.IsTitle(ch) { -+ buffer[i] = unicode.ToLower(ch) -+ whitespace = false -+ } else if unicode.IsLower(ch) { -+ if whitespace { -+ buffer[i] = unicode.ToTitle(ch) -+ whitespace = false -+ } else { -+ buffer[i] = unicode.ToUpper(ch) -+ } -+ } else { -+ whitespace = unicode.IsSpace(ch) -+ } -+ } -+ return string(buffer) -+} -+ -+/* -+Initials extracts the initial letters from each word in the string. The first letter of the string and all first -+letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters -+parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string. -+ -+Parameters: -+ str - the string to get initials from -+ delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter -+Returns: -+ string of initial letters -+*/ -+func Initials(str string, delimiters ...rune) string { -+ if str == """" { -+ return str -+ } -+ if delimiters != nil && len(delimiters) == 0 { -+ return """" -+ } -+ strLen := len(str) -+ var buf bytes.Buffer -+ lastWasGap := true -+ for i := 0; i < strLen; i++ { -+ ch := rune(str[i]) -+ -+ if isDelimiter(ch, delimiters...) { -+ lastWasGap = true -+ } else if lastWasGap { -+ buf.WriteRune(ch) -+ lastWasGap = false -+ } -+ } -+ return buf.String() -+} -+ -+// private function (lower case func name) -+func isDelimiter(ch rune, delimiters ...rune) bool { -+ if delimiters == nil { -+ return unicode.IsSpace(ch) -+ } -+ for _, delimiter := range delimiters { -+ if ch == delimiter { -+ return true -+ } -+ } -+ return false -+} -diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore -new file mode 100644 -index 0000000000000..6b061e6174b3e ---- /dev/null -+++ b/vendor/github.com/Masterminds/semver/v3/.gitignore -@@ -0,0 +1 @@ -+_fuzz/ -\ No newline at end of file -diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml -new file mode 100644 -index 0000000000000..fdbdf1448c366 ---- /dev/null -+++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml -@@ -0,0 +1,26 @@ -+run: -+ deadline: 2m -+ -+linters: -+ disable-all: true -+ enable: -+ - deadcode -+ - dupl -+ - errcheck -+ - gofmt -+ - goimports -+ - golint -+ - gosimple -+ - govet -+ - ineffassign -+ - misspell -+ - nakedret -+ - structcheck -+ - unused -+ - varcheck -+ -+linters-settings: -+ gofmt: -+ simplify: true -+ dupl: -+ threshold: 400 -diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md -new file mode 100644 -index 0000000000000..1f90c38d260d0 ---- /dev/null -+++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md -@@ -0,0 +1,194 @@ -+# Changelog -+ -+## 3.1.1 (2020-11-23) -+ -+### Fixed -+ -+- #158: Fixed issue with generated regex operation order that could cause problem -+ -+## 3.1.0 (2020-04-15) -+ -+### Added -+ -+- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah) -+ -+### Changed -+ -+- #148: More accurate validation messages on constraints -+ -+## 3.0.3 (2019-12-13) -+ -+### Fixed -+ -+- #141: Fixed issue with <= comparison -+ -+## 3.0.2 (2019-11-14) -+ -+### Fixed -+ -+- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) -+ -+## 3.0.1 (2019-09-13) -+ -+### Fixed -+ -+- #125: Fixes issue with module path for v3 -+ -+## 3.0.0 (2019-09-12) -+ -+This is a major release of the semver package which includes API changes. The Go -+API is compatible with ^1. The Go API was not changed because many people are using -+`go get` without Go modules for their applications and API breaking changes cause -+errors which we have or would need to support. -+ -+The changes in this release are the handling based on the data passed into the -+functions. These are described in the added and changed sections below. -+ -+### Added -+ -+- StrictNewVersion function. This is similar to NewVersion but will return an -+ error if the version passed in is not a strict semantic version. For example, -+ 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly -+ speaking semantic versions. This function is faster, performs fewer operations, -+ and uses fewer allocations than NewVersion. -+- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. -+ The Makefile contains the operations used. For more information on you can start -+ on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing -+- Now using Go modules -+ -+### Changed -+ -+- NewVersion has proper prerelease and metadata validation with error messages -+ to signal an issue with either of them -+- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the -+ version is >=1 the ^ ranges works the same as v1. For major versions of 0 the -+ rules have changed. The minor version is treated as the stable version unless -+ a patch is specified and then it is equivalent to =. One difference from npm/js -+ is that prereleases there are only to a specific version (e.g. 1.2.3). -+ Prereleases here look over multiple versions and follow semantic version -+ ordering rules. This pattern now follows along with the expected and requested -+ handling of this packaged by numerous users. -+ -+## 1.5.0 (2019-09-11) -+ -+### Added -+ -+- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) -+ -+### Changed -+ -+- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) -+- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) -+- #72: Adding docs comment pointing to vert for a cli -+- #71: Update the docs on pre-release comparator handling -+- #89: Test with new go versions (thanks @thedevsaddam) -+- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) -+ -+### Fixed -+ -+- #78: Fix unchecked error in example code (thanks @ravron) -+- #70: Fix the handling of pre-releases and the 0.0.0 release edge case -+- #97: Fixed copyright file for proper display on GitHub -+- #107: Fix handling prerelease when sorting alphanum and num -+- #109: Fixed where Validate sometimes returns wrong message on error -+ -+## 1.4.2 (2018-04-10) -+ -+### Changed -+ -+- #72: Updated the docs to point to vert for a console appliaction -+- #71: Update the docs on pre-release comparator handling -+ -+### Fixed -+ -+- #70: Fix the handling of pre-releases and the 0.0.0 release edge case -+ -+## 1.4.1 (2018-04-02) -+ -+### Fixed -+ -+- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) -+ -+## 1.4.0 (2017-10-04) -+ -+### Changed -+ -+- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) -+ -+## 1.3.1 (2017-07-10) -+ -+### Fixed -+ -+- Fixed #57: number comparisons in prerelease sometimes inaccurate -+ -+## 1.3.0 (2017-05-02) -+ -+### Added -+ -+- #45: Added json (un)marshaling support (thanks @mh-cbon) -+- Stability marker. See https://masterminds.github.io/stability/ -+ -+### Fixed -+ -+- #51: Fix handling of single digit tilde constraint (thanks @dgodd) -+ -+### Changed -+ -+- #55: The godoc icon moved from png to svg -+ -+## 1.2.3 (2017-04-03) -+ -+### Fixed -+ -+- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * -+ -+## Release 1.2.2 (2016-12-13) -+ -+### Fixed -+ -+- #34: Fixed issue where hyphen range was not working with pre-release parsing. -+ -+## Release 1.2.1 (2016-11-28) -+ -+### Fixed -+ -+- #24: Fixed edge case issue where constraint ""> 0"" does not handle ""0.0.1-alpha"" -+ properly. -+ -+## Release 1.2.0 (2016-11-04) -+ -+### Added -+ -+- #20: Added MustParse function for versions (thanks @adamreese) -+- #15: Added increment methods on versions (thanks @mh-cbon) -+ -+### Fixed -+ -+- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and -+ might not satisfy the intended compatibility. The change here ignores pre-releases -+ on constraint checks (e.g., ~ or ^) when a pre-release is not part of the -+ constraint. For example, `^1.2.3` will ignore pre-releases while -+ `^1.2.3-alpha` will include them. -+ -+## Release 1.1.1 (2016-06-30) -+ -+### Changed -+ -+- Issue #9: Speed up version comparison performance (thanks @sdboyer) -+- Issue #8: Added benchmarks (thanks @sdboyer) -+- Updated Go Report Card URL to new location -+- Updated Readme to add code snippet formatting (thanks @mh-cbon) -+- Updating tagging to v[SemVer] structure for compatibility with other tools. -+ -+## Release 1.1.0 (2016-03-11) -+ -+- Issue #2: Implemented validation to provide reasons a versions failed a -+ constraint. -+ -+## Release 1.0.1 (2015-12-31) -+ -+- Fixed #1: * constraint failing on valid versions. -+ -+## Release 1.0.0 (2015-10-20) -+ -+- Initial release -diff --git a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt -new file mode 100644 -index 0000000000000..9ff7da9c48b67 ---- /dev/null -+++ b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt -@@ -0,0 +1,19 @@ -+Copyright (C) 2014-2019, Matt Butcher and Matt Farina -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the ""Software""), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -+THE SOFTWARE. -diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile -new file mode 100644 -index 0000000000000..eac19178fbd1b ---- /dev/null -+++ b/vendor/github.com/Masterminds/semver/v3/Makefile -@@ -0,0 +1,37 @@ -+GOPATH=$(shell go env GOPATH) -+GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint -+GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build -+GOFUZZ = $(GOPATH)/bin/go-fuzz -+ -+.PHONY: lint -+lint: $(GOLANGCI_LINT) -+ @echo ""==> Linting codebase"" -+ @$(GOLANGCI_LINT) run -+ -+.PHONY: test -+test: -+ @echo ""==> Running tests"" -+ GO111MODULE=on go test -v -+ -+.PHONY: test-cover -+test-cover: -+ @echo ""==> Running Tests with coverage"" -+ GO111MODULE=on go test -cover . -+ -+.PHONY: fuzz -+fuzz: $(GOFUZZBUILD) $(GOFUZZ) -+ @echo ""==> Fuzz testing"" -+ $(GOFUZZBUILD) -+ $(GOFUZZ) -workdir=_fuzz -+ -+$(GOLANGCI_LINT): -+ # Install golangci-lint. The configuration for it is in the .golangci.yml -+ # file in the root of the repository -+ echo ${GOPATH} -+ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 -+ -+$(GOFUZZBUILD): -+ cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build -+ -+$(GOFUZZ): -+ cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep -\ No newline at end of file -diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md -new file mode 100644 -index 0000000000000..d8f54dcbd3c69 ---- /dev/null -+++ b/vendor/github.com/Masterminds/semver/v3/README.md -@@ -0,0 +1,244 @@ -+# SemVer -+ -+The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: -+ -+* Parse semantic versions -+* Sort semantic versions -+* Check if a semantic version fits within a set of constraints -+* Optionally work with a `v` prefix -+ -+[![Stability: -+Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) -+[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) -+[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) -+[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) -+ -+If you are looking for a command line tool for version comparisons please see -+[vert](https://github.com/Masterminds/vert) which uses this library. -+ -+## Package Versions -+ -+There are three major versions fo the `semver` package. -+ -+* 3.x.x is the new stable and active version. This version is focused on constraint -+ compatibility for range handling in other tools from other languages. It has -+ a similar API to the v1 releases. The development of this version is on the master -+ branch. The documentation for this version is below. -+* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are -+ no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). -+ There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). -+* 1.x.x is the most widely used version with numerous tagged releases. This is the -+ previous stable and is still maintained for bug fixes. The development, to fix -+ bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md). -+ -+## Parsing Semantic Versions -+ -+There are two functions that can parse semantic versions. The `StrictNewVersion` -+function only parses valid version 2 semantic versions as outlined in the -+specification. The `NewVersion` function attempts to coerce a version into a -+semantic version and parse it. For example, if there is a leading v or a version -+listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid -+semantic version (e.g., 1.2.0). In both cases a `Version` object is returned -+that can be sorted, compared, and used in constraints. -+ -+When parsing a version an error is returned if there is an issue parsing the -+version. For example, -+ -+ v, err := semver.NewVersion(""1.2.3-beta.1+build345"") -+ -+The version object has methods to get the parts of the version, compare it to -+other versions, convert the version back into a string, and get the original -+string. Getting the original string is useful if the semantic version was coerced -+into a valid form. -+ -+## Sorting Semantic Versions -+ -+A set of versions can be sorted using the `sort` package from the standard library. -+For example, -+ -+```go -+raw := []string{""1.2.3"", ""1.0"", ""1.3"", ""2"", ""0.4.2"",} -+vs := make([]*semver.Version, len(raw)) -+for i, r := range raw { -+ v, err := semver.NewVersion(r) -+ if err != nil { -+ t.Errorf(""Error parsing version: %s"", err) -+ } -+ -+ vs[i] = v -+} -+ -+sort.Sort(semver.Collection(vs)) -+``` -+ -+## Checking Version Constraints -+ -+There are two methods for comparing versions. One uses comparison methods on -+`Version` instances and the other uses `Constraints`. There are some important -+differences to notes between these two methods of comparison. -+ -+1. When two versions are compared using functions such as `Compare`, `LessThan`, -+ and others it will follow the specification and always include prereleases -+ within the comparison. It will provide an answer that is valid with the -+ comparison section of the spec at https://semver.org/#spec-item-11 -+2. When constraint checking is used for checks or validation it will follow a -+ different set of rules that are common for ranges with tools like npm/js -+ and Rust/Cargo. This includes considering prereleases to be invalid if the -+ ranges does not include one. If you want to have it include pre-releases a -+ simple solution is to include `-0` in your range. -+3. Constraint ranges can have some complex rules including the shorthand use of -+ ~ and ^. For more details on those see the options below. -+ -+There are differences between the two methods or checking versions because the -+comparison methods on `Version` follow the specification while comparison ranges -+are not part of the specification. Different packages and tools have taken it -+upon themselves to come up with range rules. This has resulted in differences. -+For example, npm/js and Cargo/Rust follow similar patterns while PHP has a -+different pattern for ^. The comparison features in this package follow the -+npm/js and Cargo/Rust lead because applications using it have followed similar -+patters with their versions. -+ -+Checking a version against version constraints is one of the most featureful -+parts of the package. -+ -+```go -+c, err := semver.NewConstraint("">= 1.2.3"") -+if err != nil { -+ // Handle constraint not being parsable. -+} -+ -+v, err := semver.NewVersion(""1.3"") -+if err != nil { -+ // Handle version not being parsable. -+} -+// Check if the version meets the constraints. The a variable will be true. -+a := c.Check(v) -+``` -+ -+### Basic Comparisons -+ -+There are two elements to the comparisons. First, a comparison string is a list -+of space or comma separated AND comparisons. These are then separated by || (OR) -+comparisons. For example, `"">= 1.2 < 3.0.0 || >= 4.2.3""` is looking for a -+comparison that's greater than or equal to 1.2 and less than 3.0.0 or is -+greater than or equal to 4.2.3. -+ -+The basic comparisons are: -+ -+* `=`: equal (aliased to no operator) -+* `!=`: not equal -+* `>`: greater than -+* `<`: less than -+* `>=`: greater than or equal to -+* `<=`: less than or equal to -+ -+### Working With Prerelease Versions -+ -+Pre-releases, for those not familiar with them, are used for software releases -+prior to stable or generally available releases. Examples of prereleases include -+development, alpha, beta, and release candidate releases. A prerelease may be -+a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the -+order of precedence, prereleases come before their associated releases. In this -+example `1.2.3-beta.1 < 1.2.3`. -+ -+According to the Semantic Version specification prereleases may not be -+API compliant with their release counterpart. It says, -+ -+> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. -+ -+SemVer comparisons using constraints without a prerelease comparator will skip -+prerelease versions. For example, `>=1.2.3` will skip prereleases when looking -+at a list of releases while `>=1.2.3-0` will evaluate and find prereleases. -+ -+The reason for the `0` as a pre-release version in the example comparison is -+because pre-releases can only contain ASCII alphanumerics and hyphens (along with -+`.` separators), per the spec. Sorting happens in ASCII sort order, again per the -+spec. The lowest character is a `0` in ASCII sort order -+(see an [ASCII Table](http://www.asciitable.com/)) -+ -+Understanding ASCII sort ordering is important because A-Z comes before a-z. That -+means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case -+sensitivity doesn't apply here. This is due to ASCII sort ordering which is what -+the spec specifies. -+ -+### Hyphen Range Comparisons -+ -+There are multiple methods to handle ranges and the first is hyphens ranges. -+These look like: -+ -+* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` -+* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` -+ -+### Wildcards In Comparisons -+ -+The `x`, `X`, and `*` characters can be used as a wildcard character. This works -+for all comparison operators. When used on the `=` operator it falls -+back to the patch level comparison (see tilde below). For example, -+ -+* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` -+* `>= 1.2.x` is equivalent to `>= 1.2.0` -+* `<= 2.x` is equivalent to `< 3` -+* `*` is equivalent to `>= 0.0.0` -+ -+### Tilde Range Comparisons (Patch) -+ -+The tilde (`~`) comparison operator is for patch level ranges when a minor -+version is specified and major level changes when the minor number is missing. -+For example, -+ -+* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` -+* `~1` is equivalent to `>= 1, < 2` -+* `~2.3` is equivalent to `>= 2.3, < 2.4` -+* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` -+* `~1.x` is equivalent to `>= 1, < 2` -+ -+### Caret Range Comparisons (Major) -+ -+The caret (`^`) comparison operator is for major level changes once a stable -+(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts -+as the API stability level. This is useful when comparisons of API versions as a -+major change is API breaking. For example, -+ -+* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` -+* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` -+* `^2.3` is equivalent to `>= 2.3, < 3` -+* `^2.x` is equivalent to `>= 2.0.0, < 3` -+* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` -+* `^0.2` is equivalent to `>=0.2.0 <0.3.0` -+* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` -+* `^0.0` is equivalent to `>=0.0.0 <0.1.0` -+* `^0` is equivalent to `>=0.0.0 <1.0.0` -+ -+## Validation -+ -+In addition to testing a version against a constraint, a version can be validated -+against a constraint. When validation fails a slice of errors containing why a -+version didn't meet the constraint is returned. For example, -+ -+```go -+c, err := semver.NewConstraint(""<= 1.2.3, >= 1.4"") -+if err != nil { -+ // Handle constraint not being parseable. -+} -+ -+v, err := semver.NewVersion(""1.3"") -+if err != nil { -+ // Handle version not being parseable. -+} -+ -+// Validate a version against a constraint. -+a, msgs := c.Validate(v) -+// a is false -+for _, m := range msgs { -+ fmt.Println(m) -+ -+ // Loops over the errors which would read -+ // ""1.3 is greater than 1.2.3"" -+ // ""1.3 is less than 1.4"" -+} -+``` -+ -+## Contribute -+ -+If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) -+or [create a pull request](https://github.com/Masterminds/semver/pulls). -diff --git a/vendor/github.com/Masterminds/semver/v3/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go -new file mode 100644 -index 0000000000000..a78235895fdcf ---- /dev/null -+++ b/vendor/github.com/Masterminds/semver/v3/collection.go -@@ -0,0 +1,24 @@ -+package semver -+ -+// Collection is a collection of Version instances and implements the sort -+// interface. See the sort package for more details. -+// https://golang.org/pkg/sort/ -+type Collection []*Version -+ -+// Len returns the length of a collection. The number of Version instances -+// on the slice. -+func (c Collection) Len() int { -+ return len(c) -+} -+ -+// Less is needed for the sort interface to compare two Version objects on the -+// slice. If checks if one is less than the other. -+func (c Collection) Less(i, j int) bool { -+ return c[i].LessThan(c[j]) -+} -+ -+// Swap is needed for the sort interface to replace the Version objects -+// at two different positions in the slice. -+func (c Collection) Swap(i, j int) { -+ c[i], c[j] = c[j], c[i] -+} -diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go -new file mode 100644 -index 0000000000000..547613f044f21 ---- /dev/null -+++ b/vendor/github.com/Masterminds/semver/v3/constraints.go -@@ -0,0 +1,568 @@ -+package semver -+ -+import ( -+ ""bytes"" -+ ""errors"" -+ ""fmt"" -+ ""regexp"" -+ ""strings"" -+) -+ -+// Constraints is one or more constraint that a semantic version can be -+// checked against. -+type Constraints struct { -+ constraints [][]*constraint -+} -+ -+// NewConstraint returns a Constraints instance that a Version instance can -+// be checked against. If there is a parse error it will be returned. -+func NewConstraint(c string) (*Constraints, error) { -+ -+ // Rewrite - ranges into a comparison operation. -+ c = rewriteRange(c) -+ -+ ors := strings.Split(c, ""||"") -+ or := make([][]*constraint, len(ors)) -+ for k, v := range ors { -+ -+ // TODO: Find a way to validate and fetch all the constraints in a simpler form -+ -+ // Validate the segment -+ if !validConstraintRegex.MatchString(v) { -+ return nil, fmt.Errorf(""improper constraint: %s"", v) -+ } -+ -+ cs := findConstraintRegex.FindAllString(v, -1) -+ if cs == nil { -+ cs = append(cs, v) -+ } -+ result := make([]*constraint, len(cs)) -+ for i, s := range cs { -+ pc, err := parseConstraint(s) -+ if err != nil { -+ return nil, err -+ } -+ -+ result[i] = pc -+ } -+ or[k] = result -+ } -+ -+ o := &Constraints{constraints: or} -+ return o, nil -+} -+ -+// Check tests if a version satisfies the constraints. -+func (cs Constraints) Check(v *Version) bool { -+ // TODO(mattfarina): For v4 of this library consolidate the Check and Validate -+ // functions as the underlying functions make that possible now. -+ // loop over the ORs and check the inner ANDs -+ for _, o := range cs.constraints { -+ joy := true -+ for _, c := range o { -+ if check, _ := c.check(v); !check { -+ joy = false -+ break -+ } -+ } -+ -+ if joy { -+ return true -+ } -+ } -+ -+ return false -+} -+ -+// Validate checks if a version satisfies a constraint. If not a slice of -+// reasons for the failure are returned in addition to a bool. -+func (cs Constraints) Validate(v *Version) (bool, []error) { -+ // loop over the ORs and check the inner ANDs -+ var e []error -+ -+ // Capture the prerelease message only once. When it happens the first time -+ // this var is marked -+ var prerelesase bool -+ for _, o := range cs.constraints { -+ joy := true -+ for _, c := range o { -+ // Before running the check handle the case there the version is -+ // a prerelease and the check is not searching for prereleases. -+ if c.con.pre == """" && v.pre != """" { -+ if !prerelesase { -+ em := fmt.Errorf(""%s is a prerelease version and the constraint is only looking for release versions"", v) -+ e = append(e, em) -+ prerelesase = true -+ } -+ joy = false -+ -+ } else { -+ -+ if _, err := c.check(v); err != nil { -+ e = append(e, err) -+ joy = false -+ } -+ } -+ } -+ -+ if joy { -+ return true, []error{} -+ } -+ } -+ -+ return false, e -+} -+ -+func (cs Constraints) String() string { -+ buf := make([]string, len(cs.constraints)) -+ var tmp bytes.Buffer -+ -+ for k, v := range cs.constraints { -+ tmp.Reset() -+ vlen := len(v) -+ for kk, c := range v { -+ tmp.WriteString(c.string()) -+ -+ // Space separate the AND conditions -+ if vlen > 1 && kk < vlen-1 { -+ tmp.WriteString("" "") -+ } -+ } -+ buf[k] = tmp.String() -+ } -+ -+ return strings.Join(buf, "" || "") -+} -+ -+var constraintOps map[string]cfunc -+var constraintRegex *regexp.Regexp -+var constraintRangeRegex *regexp.Regexp -+ -+// Used to find individual constraints within a multi-constraint string -+var findConstraintRegex *regexp.Regexp -+ -+// Used to validate an segment of ANDs is valid -+var validConstraintRegex *regexp.Regexp -+ -+const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + -+ `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + -+ `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` -+ -+func init() { -+ constraintOps = map[string]cfunc{ -+ """": constraintTildeOrEqual, -+ ""="": constraintTildeOrEqual, -+ ""!="": constraintNotEqual, -+ "">"": constraintGreaterThan, -+ ""<"": constraintLessThan, -+ "">="": constraintGreaterThanEqual, -+ ""=>"": constraintGreaterThanEqual, -+ ""<="": constraintLessThanEqual, -+ ""=<"": constraintLessThanEqual, -+ ""~"": constraintTilde, -+ ""~>"": constraintTilde, -+ ""^"": constraintCaret, -+ } -+ -+ ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^` -+ -+ constraintRegex = regexp.MustCompile(fmt.Sprintf( -+ `^\s*(%s)\s*(%s)\s*$`, -+ ops, -+ cvRegex)) -+ -+ constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( -+ `\s*(%s)\s+-\s+(%s)\s*`, -+ cvRegex, cvRegex)) -+ -+ findConstraintRegex = regexp.MustCompile(fmt.Sprintf( -+ `(%s)\s*(%s)`, -+ ops, -+ cvRegex)) -+ -+ validConstraintRegex = regexp.MustCompile(fmt.Sprintf( -+ `^(\s*(%s)\s*(%s)\s*\,?)+$`, -+ ops, -+ cvRegex)) -+} -+ -+// An individual constraint -+type constraint struct { -+ // The version used in the constraint check. For example, if a constraint -+ // is '<= 2.0.0' the con a version instance representing 2.0.0. -+ con *Version -+ -+ // The original parsed version (e.g., 4.x from != 4.x) -+ orig string -+ -+ // The original operator for the constraint -+ origfunc string -+ -+ // When an x is used as part of the version (e.g., 1.x) -+ minorDirty bool -+ dirty bool -+ patchDirty bool -+} -+ -+// Check if a version meets the constraint -+func (c *constraint) check(v *Version) (bool, error) { -+ return constraintOps[c.origfunc](v, c) -+} -+ -+// String prints an individual constraint into a string -+func (c *constraint) string() string { -+ return c.origfunc + c.orig -+} -+ -+type cfunc func(v *Version, c *constraint) (bool, error) -+ -+func parseConstraint(c string) (*constraint, error) { -+ if len(c) > 0 { -+ m := constraintRegex.FindStringSubmatch(c) -+ if m == nil { -+ return nil, fmt.Errorf(""improper constraint: %s"", c) -+ } -+ -+ cs := &constraint{ -+ orig: m[2], -+ origfunc: m[1], -+ } -+ -+ ver := m[2] -+ minorDirty := false -+ patchDirty := false -+ dirty := false -+ if isX(m[3]) || m[3] == """" { -+ ver = ""0.0.0"" -+ dirty = true -+ } else if isX(strings.TrimPrefix(m[4], ""."")) || m[4] == """" { -+ minorDirty = true -+ dirty = true -+ ver = fmt.Sprintf(""%s.0.0%s"", m[3], m[6]) -+ } else if isX(strings.TrimPrefix(m[5], ""."")) || m[5] == """" { -+ dirty = true -+ patchDirty = true -+ ver = fmt.Sprintf(""%s%s.0%s"", m[3], m[4], m[6]) -+ } -+ -+ con, err := NewVersion(ver) -+ if err != nil { -+ -+ // The constraintRegex should catch any regex parsing errors. So, -+ // we should never get here. -+ return nil, errors.New(""constraint Parser Error"") -+ } -+ -+ cs.con = con -+ cs.minorDirty = minorDirty -+ cs.patchDirty = patchDirty -+ cs.dirty = dirty -+ -+ return cs, nil -+ } -+ -+ // The rest is the special case where an empty string was passed in which -+ // is equivalent to * or >=0.0.0 -+ con, err := StrictNewVersion(""0.0.0"") -+ if err != nil { -+ -+ // The constraintRegex should catch any regex parsing errors. So, -+ // we should never get here. -+ return nil, errors.New(""constraint Parser Error"") -+ } -+ -+ cs := &constraint{ -+ con: con, -+ orig: c, -+ origfunc: """", -+ minorDirty: false, -+ patchDirty: false, -+ dirty: true, -+ } -+ return cs, nil -+} -+ -+// Constraint functions -+func constraintNotEqual(v *Version, c *constraint) (bool, error) { -+ if c.dirty { -+ -+ // If there is a pre-release on the version but the constraint isn't looking -+ // for them assume that pre-releases are not compatible. See issue 21 for -+ // more details. -+ if v.Prerelease() != """" && c.con.Prerelease() == """" { -+ return false, fmt.Errorf(""%s is a prerelease version and the constraint is only looking for release versions"", v) -+ } -+ -+ if c.con.Major() != v.Major() { -+ return true, nil -+ } -+ if c.con.Minor() != v.Minor() && !c.minorDirty { -+ return true, nil -+ } else if c.minorDirty { -+ return false, fmt.Errorf(""%s is equal to %s"", v, c.orig) -+ } else if c.con.Patch() != v.Patch() && !c.patchDirty { -+ return true, nil -+ } else if c.patchDirty { -+ // Need to handle prereleases if present -+ if v.Prerelease() != """" || c.con.Prerelease() != """" { -+ eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 -+ if eq { -+ return true, nil -+ } -+ return false, fmt.Errorf(""%s is equal to %s"", v, c.orig) -+ } -+ return false, fmt.Errorf(""%s is equal to %s"", v, c.orig) -+ } -+ } -+ -+ eq := v.Equal(c.con) -+ if eq { -+ return false, fmt.Errorf(""%s is equal to %s"", v, c.orig) -+ } -+ -+ return true, nil -+} -+ -+func constraintGreaterThan(v *Version, c *constraint) (bool, error) { -+ -+ // If there is a pre-release on the version but the constraint isn't looking -+ // for them assume that pre-releases are not compatible. See issue 21 for -+ // more details. -+ if v.Prerelease() != """" && c.con.Prerelease() == """" { -+ return false, fmt.Errorf(""%s is a prerelease version and the constraint is only looking for release versions"", v) -+ } -+ -+ var eq bool -+ -+ if !c.dirty { -+ eq = v.Compare(c.con) == 1 -+ if eq { -+ return true, nil -+ } -+ return false, fmt.Errorf(""%s is less than or equal to %s"", v, c.orig) -+ } -+ -+ if v.Major() > c.con.Major() { -+ return true, nil -+ } else if v.Major() < c.con.Major() { -+ return false, fmt.Errorf(""%s is less than or equal to %s"", v, c.orig) -+ } else if c.minorDirty { -+ // This is a range case such as >11. When the version is something like -+ // 11.1.0 is it not > 11. For that we would need 12 or higher -+ return false, fmt.Errorf(""%s is less than or equal to %s"", v, c.orig) -+ } else if c.patchDirty { -+ // This is for ranges such as >11.1. A version of 11.1.1 is not greater -+ // which one of 11.2.1 is greater -+ eq = v.Minor() > c.con.Minor() -+ if eq { -+ return true, nil -+ } -+ return false, fmt.Errorf(""%s is less than or equal to %s"", v, c.orig) -+ } -+ -+ // If we have gotten here we are not comparing pre-preleases and can use the -+ // Compare function to accomplish that. -+ eq = v.Compare(c.con) == 1 -+ if eq { -+ return true, nil -+ } -+ return false, fmt.Errorf(""%s is less than or equal to %s"", v, c.orig) -+} -+ -+func constraintLessThan(v *Version, c *constraint) (bool, error) { -+ // If there is a pre-release on the version but the constraint isn't looking -+ // for them assume that pre-releases are not compatible. See issue 21 for -+ // more details. -+ if v.Prerelease() != """" && c.con.Prerelease() == """" { -+ return false, fmt.Errorf(""%s is a prerelease version and the constraint is only looking for release versions"", v) -+ } -+ -+ eq := v.Compare(c.con) < 0 -+ if eq { -+ return true, nil -+ } -+ return false, fmt.Errorf(""%s is greater than or equal to %s"", v, c.orig) -+} -+ -+func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { -+ -+ // If there is a pre-release on the version but the constraint isn't looking -+ // for them assume that pre-releases are not compatible. See issue 21 for -+ // more details. -+ if v.Prerelease() != """" && c.con.Prerelease() == """" { -+ return false, fmt.Errorf(""%s is a prerelease version and the constraint is only looking for release versions"", v) -+ } -+ -+ eq := v.Compare(c.con) >= 0 -+ if eq { -+ return true, nil -+ } -+ return false, fmt.Errorf(""%s is less than %s"", v, c.orig) -+} -+ -+func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { -+ // If there is a pre-release on the version but the constraint isn't looking -+ // for them assume that pre-releases are not compatible. See issue 21 for -+ // more details. -+ if v.Prerelease() != """" && c.con.Prerelease() == """" { -+ return false, fmt.Errorf(""%s is a prerelease version and the constraint is only looking for release versions"", v) -+ } -+ -+ var eq bool -+ -+ if !c.dirty { -+ eq = v.Compare(c.con) <= 0 -+ if eq { -+ return true, nil -+ } -+ return false, fmt.Errorf(""%s is greater than %s"", v, c.orig) -+ } -+ -+ if v.Major() > c.con.Major() { -+ return false, fmt.Errorf(""%s is greater than %s"", v, c.orig) -+ } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { -+ return false, fmt.Errorf(""%s is greater than %s"", v, c.orig) -+ } -+ -+ return true, nil -+} -+ -+// ~*, ~>* --> >= 0.0.0 (any) -+// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 -+// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 -+// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 -+// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 -+// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 -+func constraintTilde(v *Version, c *constraint) (bool, error) { -+ // If there is a pre-release on the version but the constraint isn't looking -+ // for them assume that pre-releases are not compatible. See issue 21 for -+ // more details. -+ if v.Prerelease() != """" && c.con.Prerelease() == """" { -+ return false, fmt.Errorf(""%s is a prerelease version and the constraint is only looking for release versions"", v) -+ } -+ -+ if v.LessThan(c.con) { -+ return false, fmt.Errorf(""%s is less than %s"", v, c.orig) -+ } -+ -+ // ~0.0.0 is a special case where all constraints are accepted. It's -+ // equivalent to >= 0.0.0. -+ if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && -+ !c.minorDirty && !c.patchDirty { -+ return true, nil -+ } -+ -+ if v.Major() != c.con.Major() { -+ return false, fmt.Errorf(""%s does not have same major version as %s"", v, c.orig) -+ } -+ -+ if v.Minor() != c.con.Minor() && !c.minorDirty { -+ return false, fmt.Errorf(""%s does not have same major and minor version as %s"", v, c.orig) -+ } -+ -+ return true, nil -+} -+ -+// When there is a .x (dirty) status it automatically opts in to ~. Otherwise -+// it's a straight = -+func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { -+ // If there is a pre-release on the version but the constraint isn't looking -+ // for them assume that pre-releases are not compatible. See issue 21 for -+ // more details. -+ if v.Prerelease() != """" && c.con.Prerelease() == """" { -+ return false, fmt.Errorf(""%s is a prerelease version and the constraint is only looking for release versions"", v) -+ } -+ -+ if c.dirty { -+ return constraintTilde(v, c) -+ } -+ -+ eq := v.Equal(c.con) -+ if eq { -+ return true, nil -+ } -+ -+ return false, fmt.Errorf(""%s is not equal to %s"", v, c.orig) -+} -+ -+// ^* --> (any) -+// ^1.2.3 --> >=1.2.3 <2.0.0 -+// ^1.2 --> >=1.2.0 <2.0.0 -+// ^1 --> >=1.0.0 <2.0.0 -+// ^0.2.3 --> >=0.2.3 <0.3.0 -+// ^0.2 --> >=0.2.0 <0.3.0 -+// ^0.0.3 --> >=0.0.3 <0.0.4 -+// ^0.0 --> >=0.0.0 <0.1.0 -+// ^0 --> >=0.0.0 <1.0.0 -+func constraintCaret(v *Version, c *constraint) (bool, error) { -+ // If there is a pre-release on the version but the constraint isn't looking -+ // for them assume that pre-releases are not compatible. See issue 21 for -+ // more details. -+ if v.Prerelease() != """" && c.con.Prerelease() == """" { -+ return false, fmt.Errorf(""%s is a prerelease version and the constraint is only looking for release versions"", v) -+ } -+ -+ // This less than handles prereleases -+ if v.LessThan(c.con) { -+ return false, fmt.Errorf(""%s is less than %s"", v, c.orig) -+ } -+ -+ var eq bool -+ -+ // ^ when the major > 0 is >=x.y.z < x+1 -+ if c.con.Major() > 0 || c.minorDirty { -+ -+ // ^ has to be within a major range for > 0. Everything less than was -+ // filtered out with the LessThan call above. This filters out those -+ // that greater but not within the same major range. -+ eq = v.Major() == c.con.Major() -+ if eq { -+ return true, nil -+ } -+ return false, fmt.Errorf(""%s does not have same major version as %s"", v, c.orig) -+ } -+ -+ // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 -+ if c.con.Major() == 0 && v.Major() > 0 { -+ return false, fmt.Errorf(""%s does not have same major version as %s"", v, c.orig) -+ } -+ // If the con Minor is > 0 it is not dirty -+ if c.con.Minor() > 0 || c.patchDirty { -+ eq = v.Minor() == c.con.Minor() -+ if eq { -+ return true, nil -+ } -+ return false, fmt.Errorf(""%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0"", v, c.orig) -+ } -+ -+ // At this point the major is 0 and the minor is 0 and not dirty. The patch -+ // is not dirty so we need to check if they are equal. If they are not equal -+ eq = c.con.Patch() == v.Patch() -+ if eq { -+ return true, nil -+ } -+ return false, fmt.Errorf(""%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0"", v, c.orig) -+} -+ -+func isX(x string) bool { -+ switch x { -+ case ""x"", ""*"", ""X"": -+ return true -+ default: -+ return false -+ } -+} -+ -+func rewriteRange(i string) string { -+ m := constraintRangeRegex.FindAllStringSubmatch(i, -1) -+ if m == nil { -+ return i -+ } -+ o := i -+ for _, v := range m { -+ t := fmt.Sprintf("">= %s, <= %s"", v[1], v[11]) -+ o = strings.Replace(o, v[0], t, 1) -+ } -+ -+ return o -+} -diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go -new file mode 100644 -index 0000000000000..391aa46b76df8 ---- /dev/null -+++ b/vendor/github.com/Masterminds/semver/v3/doc.go -@@ -0,0 +1,184 @@ -+/* -+Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. -+ -+Specifically it provides the ability to: -+ -+ * Parse semantic versions -+ * Sort semantic versions -+ * Check if a semantic version fits within a set of constraints -+ * Optionally work with a `v` prefix -+ -+Parsing Semantic Versions -+ -+There are two functions that can parse semantic versions. The `StrictNewVersion` -+function only parses valid version 2 semantic versions as outlined in the -+specification. The `NewVersion` function attempts to coerce a version into a -+semantic version and parse it. For example, if there is a leading v or a version -+listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid -+semantic version (e.g., 1.2.0). In both cases a `Version` object is returned -+that can be sorted, compared, and used in constraints. -+ -+When parsing a version an optional error can be returned if there is an issue -+parsing the version. For example, -+ -+ v, err := semver.NewVersion(""1.2.3-beta.1+b345"") -+ -+The version object has methods to get the parts of the version, compare it to -+other versions, convert the version back into a string, and get the original -+string. For more details please see the documentation -+at https://godoc.org/github.com/Masterminds/semver. -+ -+Sorting Semantic Versions -+ -+A set of versions can be sorted using the `sort` package from the standard library. -+For example, -+ -+ raw := []string{""1.2.3"", ""1.0"", ""1.3"", ""2"", ""0.4.2"",} -+ vs := make([]*semver.Version, len(raw)) -+ for i, r := range raw { -+ v, err := semver.NewVersion(r) -+ if err != nil { -+ t.Errorf(""Error parsing version: %s"", err) -+ } -+ -+ vs[i] = v -+ } -+ -+ sort.Sort(semver.Collection(vs)) -+ -+Checking Version Constraints and Comparing Versions -+ -+There are two methods for comparing versions. One uses comparison methods on -+`Version` instances and the other is using Constraints. There are some important -+differences to notes between these two methods of comparison. -+ -+1. When two versions are compared using functions such as `Compare`, `LessThan`, -+ and others it will follow the specification and always include prereleases -+ within the comparison. It will provide an answer valid with the comparison -+ spec section at https://semver.org/#spec-item-11 -+2. When constraint checking is used for checks or validation it will follow a -+ different set of rules that are common for ranges with tools like npm/js -+ and Rust/Cargo. This includes considering prereleases to be invalid if the -+ ranges does not include on. If you want to have it include pre-releases a -+ simple solution is to include `-0` in your range. -+3. Constraint ranges can have some complex rules including the shorthard use of -+ ~ and ^. For more details on those see the options below. -+ -+There are differences between the two methods or checking versions because the -+comparison methods on `Version` follow the specification while comparison ranges -+are not part of the specification. Different packages and tools have taken it -+upon themselves to come up with range rules. This has resulted in differences. -+For example, npm/js and Cargo/Rust follow similar patterns which PHP has a -+different pattern for ^. The comparison features in this package follow the -+npm/js and Cargo/Rust lead because applications using it have followed similar -+patters with their versions. -+ -+Checking a version against version constraints is one of the most featureful -+parts of the package. -+ -+ c, err := semver.NewConstraint("">= 1.2.3"") -+ if err != nil { -+ // Handle constraint not being parsable. -+ } -+ -+ v, err := semver.NewVersion(""1.3"") -+ if err != nil { -+ // Handle version not being parsable. -+ } -+ // Check if the version meets the constraints. The a variable will be true. -+ a := c.Check(v) -+ -+Basic Comparisons -+ -+There are two elements to the comparisons. First, a comparison string is a list -+of comma or space separated AND comparisons. These are then separated by || (OR) -+comparisons. For example, `"">= 1.2 < 3.0.0 || >= 4.2.3""` is looking for a -+comparison that's greater than or equal to 1.2 and less than 3.0.0 or is -+greater than or equal to 4.2.3. This can also be written as -+`"">= 1.2, < 3.0.0 || >= 4.2.3""` -+ -+The basic comparisons are: -+ -+ * `=`: equal (aliased to no operator) -+ * `!=`: not equal -+ * `>`: greater than -+ * `<`: less than -+ * `>=`: greater than or equal to -+ * `<=`: less than or equal to -+ -+Hyphen Range Comparisons -+ -+There are multiple methods to handle ranges and the first is hyphens ranges. -+These look like: -+ -+ * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` -+ * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` -+ -+Wildcards In Comparisons -+ -+The `x`, `X`, and `*` characters can be used as a wildcard character. This works -+for all comparison operators. When used on the `=` operator it falls -+back to the tilde operation. For example, -+ -+ * `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` -+ * `>= 1.2.x` is equivalent to `>= 1.2.0` -+ * `<= 2.x` is equivalent to `<= 3` -+ * `*` is equivalent to `>= 0.0.0` -+ -+Tilde Range Comparisons (Patch) -+ -+The tilde (`~`) comparison operator is for patch level ranges when a minor -+version is specified and major level changes when the minor number is missing. -+For example, -+ -+ * `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` -+ * `~1` is equivalent to `>= 1, < 2` -+ * `~2.3` is equivalent to `>= 2.3 < 2.4` -+ * `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` -+ * `~1.x` is equivalent to `>= 1 < 2` -+ -+Caret Range Comparisons (Major) -+ -+The caret (`^`) comparison operator is for major level changes once a stable -+(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts -+as the API stability level. This is useful when comparisons of API versions as a -+major change is API breaking. For example, -+ -+ * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` -+ * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` -+ * `^2.3` is equivalent to `>= 2.3, < 3` -+ * `^2.x` is equivalent to `>= 2.0.0, < 3` -+ * `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` -+ * `^0.2` is equivalent to `>=0.2.0 <0.3.0` -+ * `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` -+ * `^0.0` is equivalent to `>=0.0.0 <0.1.0` -+ * `^0` is equivalent to `>=0.0.0 <1.0.0` -+ -+Validation -+ -+In addition to testing a version against a constraint, a version can be validated -+against a constraint. When validation fails a slice of errors containing why a -+version didn't meet the constraint is returned. For example, -+ -+ c, err := semver.NewConstraint(""<= 1.2.3, >= 1.4"") -+ if err != nil { -+ // Handle constraint not being parseable. -+ } -+ -+ v, _ := semver.NewVersion(""1.3"") -+ if err != nil { -+ // Handle version not being parseable. -+ } -+ -+ // Validate a version against a constraint. -+ a, msgs := c.Validate(v) -+ // a is false -+ for _, m := range msgs { -+ fmt.Println(m) -+ -+ // Loops over the errors which would read -+ // ""1.3 is greater than 1.2.3"" -+ // ""1.3 is less than 1.4"" -+ } -+*/ -+package semver -diff --git a/vendor/github.com/Masterminds/semver/v3/fuzz.go b/vendor/github.com/Masterminds/semver/v3/fuzz.go -new file mode 100644 -index 0000000000000..a242ad70587ce ---- /dev/null -+++ b/vendor/github.com/Masterminds/semver/v3/fuzz.go -@@ -0,0 +1,22 @@ -+// +build gofuzz -+ -+package semver -+ -+func Fuzz(data []byte) int { -+ d := string(data) -+ -+ // Test NewVersion -+ _, _ = NewVersion(d) -+ -+ // Test StrictNewVersion -+ _, _ = StrictNewVersion(d) -+ -+ // Test NewConstraint -+ _, _ = NewConstraint(d) -+ -+ // The return value should be 0 normally, 1 if the priority in future tests -+ // should be increased, and -1 if future tests should skip passing in that -+ // data. We do not have a reason to change priority so 0 is always returned. -+ // There are example tests that do this. -+ return 0 -+} -diff --git a/vendor/github.com/Masterminds/semver/v3/go.mod b/vendor/github.com/Masterminds/semver/v3/go.mod -new file mode 100644 -index 0000000000000..658233c8f0123 ---- /dev/null -+++ b/vendor/github.com/Masterminds/semver/v3/go.mod -@@ -0,0 +1,3 @@ -+module github.com/Masterminds/semver/v3 -+ -+go 1.12 -diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go -new file mode 100644 -index 0000000000000..d6b9cda3eeb70 ---- /dev/null -+++ b/vendor/github.com/Masterminds/semver/v3/version.go -@@ -0,0 +1,606 @@ -+package semver -+ -+import ( -+ ""bytes"" -+ ""database/sql/driver"" -+ ""encoding/json"" -+ ""errors"" -+ ""fmt"" -+ ""regexp"" -+ ""strconv"" -+ ""strings"" -+) -+ -+// The compiled version of the regex created at init() is cached here so it -+// only needs to be created once. -+var versionRegex *regexp.Regexp -+ -+var ( -+ // ErrInvalidSemVer is returned a version is found to be invalid when -+ // being parsed. -+ ErrInvalidSemVer = errors.New(""Invalid Semantic Version"") -+ -+ // ErrEmptyString is returned when an empty string is passed in for parsing. -+ ErrEmptyString = errors.New(""Version string empty"") -+ -+ // ErrInvalidCharacters is returned when invalid characters are found as -+ // part of a version -+ ErrInvalidCharacters = errors.New(""Invalid characters in version"") -+ -+ // ErrSegmentStartsZero is returned when a version segment starts with 0. -+ // This is invalid in SemVer. -+ ErrSegmentStartsZero = errors.New(""Version segment starts with 0"") -+ -+ // ErrInvalidMetadata is returned when the metadata is an invalid format -+ ErrInvalidMetadata = errors.New(""Invalid Metadata string"") -+ -+ // ErrInvalidPrerelease is returned when the pre-release is an invalid format -+ ErrInvalidPrerelease = errors.New(""Invalid Prerelease string"") -+) -+ -+// semVerRegex is the regular expression used to parse a semantic version. -+const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + -+ `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + -+ `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` -+ -+// Version represents a single semantic version. -+type Version struct { -+ major, minor, patch uint64 -+ pre string -+ metadata string -+ original string -+} -+ -+func init() { -+ versionRegex = regexp.MustCompile(""^"" + semVerRegex + ""$"") -+} -+ -+const num string = ""0123456789"" -+const allowed string = ""abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"" + num -+ -+// StrictNewVersion parses a given version and returns an instance of Version or -+// an error if unable to parse the version. Only parses valid semantic versions. -+// Performs checking that can find errors within the version. -+// If you want to coerce a version, such as 1 or 1.2, and perse that as the 1.x -+// releases of semver provided use the NewSemver() function. -+func StrictNewVersion(v string) (*Version, error) { -+ // Parsing here does not use RegEx in order to increase performance and reduce -+ // allocations. -+ -+ if len(v) == 0 { -+ return nil, ErrEmptyString -+ } -+ -+ // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build -+ parts := strings.SplitN(v, ""."", 3) -+ if len(parts) != 3 { -+ return nil, ErrInvalidSemVer -+ } -+ -+ sv := &Version{ -+ original: v, -+ } -+ -+ // check for prerelease or build metadata -+ var extra []string -+ if strings.ContainsAny(parts[2], ""-+"") { -+ // Start with the build metadata first as it needs to be on the right -+ extra = strings.SplitN(parts[2], ""+"", 2) -+ if len(extra) > 1 { -+ // build metadata found -+ sv.metadata = extra[1] -+ parts[2] = extra[0] -+ } -+ -+ extra = strings.SplitN(parts[2], ""-"", 2) -+ if len(extra) > 1 { -+ // prerelease found -+ sv.pre = extra[1] -+ parts[2] = extra[0] -+ } -+ } -+ -+ // Validate the number segments are valid. This includes only having positive -+ // numbers and no leading 0's. -+ for _, p := range parts { -+ if !containsOnly(p, num) { -+ return nil, ErrInvalidCharacters -+ } -+ -+ if len(p) > 1 && p[0] == '0' { -+ return nil, ErrSegmentStartsZero -+ } -+ } -+ -+ // Extract the major, minor, and patch elements onto the returned Version -+ var err error -+ sv.major, err = strconv.ParseUint(parts[0], 10, 64) -+ if err != nil { -+ return nil, err -+ } -+ -+ sv.minor, err = strconv.ParseUint(parts[1], 10, 64) -+ if err != nil { -+ return nil, err -+ } -+ -+ sv.patch, err = strconv.ParseUint(parts[2], 10, 64) -+ if err != nil { -+ return nil, err -+ } -+ -+ // No prerelease or build metadata found so returning now as a fastpath. -+ if sv.pre == """" && sv.metadata == """" { -+ return sv, nil -+ } -+ -+ if sv.pre != """" { -+ if err = validatePrerelease(sv.pre); err != nil { -+ return nil, err -+ } -+ } -+ -+ if sv.metadata != """" { -+ if err = validateMetadata(sv.metadata); err != nil { -+ return nil, err -+ } -+ } -+ -+ return sv, nil -+} -+ -+// NewVersion parses a given version and returns an instance of Version or -+// an error if unable to parse the version. If the version is SemVer-ish it -+// attempts to convert it to SemVer. If you want to validate it was a strict -+// semantic version at parse time see StrictNewVersion(). -+func NewVersion(v string) (*Version, error) { -+ m := versionRegex.FindStringSubmatch(v) -+ if m == nil { -+ return nil, ErrInvalidSemVer -+ } -+ -+ sv := &Version{ -+ metadata: m[8], -+ pre: m[5], -+ original: v, -+ } -+ -+ var err error -+ sv.major, err = strconv.ParseUint(m[1], 10, 64) -+ if err != nil { -+ return nil, fmt.Errorf(""Error parsing version segment: %s"", err) -+ } -+ -+ if m[2] != """" { -+ sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "".""), 10, 64) -+ if err != nil { -+ return nil, fmt.Errorf(""Error parsing version segment: %s"", err) -+ } -+ } else { -+ sv.minor = 0 -+ } -+ -+ if m[3] != """" { -+ sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "".""), 10, 64) -+ if err != nil { -+ return nil, fmt.Errorf(""Error parsing version segment: %s"", err) -+ } -+ } else { -+ sv.patch = 0 -+ } -+ -+ // Perform some basic due diligence on the extra parts to ensure they are -+ // valid. -+ -+ if sv.pre != """" { -+ if err = validatePrerelease(sv.pre); err != nil { -+ return nil, err -+ } -+ } -+ -+ if sv.metadata != """" { -+ if err = validateMetadata(sv.metadata); err != nil { -+ return nil, err -+ } -+ } -+ -+ return sv, nil -+} -+ -+// MustParse parses a given version and panics on error. -+func MustParse(v string) *Version { -+ sv, err := NewVersion(v) -+ if err != nil { -+ panic(err) -+ } -+ return sv -+} -+ -+// String converts a Version object to a string. -+// Note, if the original version contained a leading v this version will not. -+// See the Original() method to retrieve the original value. Semantic Versions -+// don't contain a leading v per the spec. Instead it's optional on -+// implementation. -+func (v Version) String() string { -+ var buf bytes.Buffer -+ -+ fmt.Fprintf(&buf, ""%d.%d.%d"", v.major, v.minor, v.patch) -+ if v.pre != """" { -+ fmt.Fprintf(&buf, ""-%s"", v.pre) -+ } -+ if v.metadata != """" { -+ fmt.Fprintf(&buf, ""+%s"", v.metadata) -+ } -+ -+ return buf.String() -+} -+ -+// Original returns the original value passed in to be parsed. -+func (v *Version) Original() string { -+ return v.original -+} -+ -+// Major returns the major version. -+func (v Version) Major() uint64 { -+ return v.major -+} -+ -+// Minor returns the minor version. -+func (v Version) Minor() uint64 { -+ return v.minor -+} -+ -+// Patch returns the patch version. -+func (v Version) Patch() uint64 { -+ return v.patch -+} -+ -+// Prerelease returns the pre-release version. -+func (v Version) Prerelease() string { -+ return v.pre -+} -+ -+// Metadata returns the metadata on the version. -+func (v Version) Metadata() string { -+ return v.metadata -+} -+ -+// originalVPrefix returns the original 'v' prefix if any. -+func (v Version) originalVPrefix() string { -+ -+ // Note, only lowercase v is supported as a prefix by the parser. -+ if v.original != """" && v.original[:1] == ""v"" { -+ return v.original[:1] -+ } -+ return """" -+} -+ -+// IncPatch produces the next patch version. -+// If the current version does not have prerelease/metadata information, -+// it unsets metadata and prerelease values, increments patch number. -+// If the current version has any of prerelease or metadata information, -+// it unsets both values and keeps current patch value -+func (v Version) IncPatch() Version { -+ vNext := v -+ // according to http://semver.org/#spec-item-9 -+ // Pre-release versions have a lower precedence than the associated normal version. -+ // according to http://semver.org/#spec-item-10 -+ // Build metadata SHOULD be ignored when determining version precedence. -+ if v.pre != """" { -+ vNext.metadata = """" -+ vNext.pre = """" -+ } else { -+ vNext.metadata = """" -+ vNext.pre = """" -+ vNext.patch = v.patch + 1 -+ } -+ vNext.original = v.originalVPrefix() + """" + vNext.String() -+ return vNext -+} -+ -+// IncMinor produces the next minor version. -+// Sets patch to 0. -+// Increments minor number. -+// Unsets metadata. -+// Unsets prerelease status. -+func (v Version) IncMinor() Version { -+ vNext := v -+ vNext.metadata = """" -+ vNext.pre = """" -+ vNext.patch = 0 -+ vNext.minor = v.minor + 1 -+ vNext.original = v.originalVPrefix() + """" + vNext.String() -+ return vNext -+} -+ -+// IncMajor produces the next major version. -+// Sets patch to 0. -+// Sets minor to 0. -+// Increments major number. -+// Unsets metadata. -+// Unsets prerelease status. -+func (v Version) IncMajor() Version { -+ vNext := v -+ vNext.metadata = """" -+ vNext.pre = """" -+ vNext.patch = 0 -+ vNext.minor = 0 -+ vNext.major = v.major + 1 -+ vNext.original = v.originalVPrefix() + """" + vNext.String() -+ return vNext -+} -+ -+// SetPrerelease defines the prerelease value. -+// Value must not include the required 'hyphen' prefix. -+func (v Version) SetPrerelease(prerelease string) (Version, error) { -+ vNext := v -+ if len(prerelease) > 0 { -+ if err := validatePrerelease(prerelease); err != nil { -+ return vNext, err -+ } -+ } -+ vNext.pre = prerelease -+ vNext.original = v.originalVPrefix() + """" + vNext.String() -+ return vNext, nil -+} -+ -+// SetMetadata defines metadata value. -+// Value must not include the required 'plus' prefix. -+func (v Version) SetMetadata(metadata string) (Version, error) { -+ vNext := v -+ if len(metadata) > 0 { -+ if err := validateMetadata(metadata); err != nil { -+ return vNext, err -+ } -+ } -+ vNext.metadata = metadata -+ vNext.original = v.originalVPrefix() + """" + vNext.String() -+ return vNext, nil -+} -+ -+// LessThan tests if one version is less than another one. -+func (v *Version) LessThan(o *Version) bool { -+ return v.Compare(o) < 0 -+} -+ -+// GreaterThan tests if one version is greater than another one. -+func (v *Version) GreaterThan(o *Version) bool { -+ return v.Compare(o) > 0 -+} -+ -+// Equal tests if two versions are equal to each other. -+// Note, versions can be equal with different metadata since metadata -+// is not considered part of the comparable version. -+func (v *Version) Equal(o *Version) bool { -+ return v.Compare(o) == 0 -+} -+ -+// Compare compares this version to another one. It returns -1, 0, or 1 if -+// the version smaller, equal, or larger than the other version. -+// -+// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is -+// lower than the version without a prerelease. Compare always takes into account -+// prereleases. If you want to work with ranges using typical range syntaxes that -+// skip prereleases if the range is not looking for them use constraints. -+func (v *Version) Compare(o *Version) int { -+ // Compare the major, minor, and patch version for differences. If a -+ // difference is found return the comparison. -+ if d := compareSegment(v.Major(), o.Major()); d != 0 { -+ return d -+ } -+ if d := compareSegment(v.Minor(), o.Minor()); d != 0 { -+ return d -+ } -+ if d := compareSegment(v.Patch(), o.Patch()); d != 0 { -+ return d -+ } -+ -+ // At this point the major, minor, and patch versions are the same. -+ ps := v.pre -+ po := o.Prerelease() -+ -+ if ps == """" && po == """" { -+ return 0 -+ } -+ if ps == """" { -+ return 1 -+ } -+ if po == """" { -+ return -1 -+ } -+ -+ return comparePrerelease(ps, po) -+} -+ -+// UnmarshalJSON implements JSON.Unmarshaler interface. -+func (v *Version) UnmarshalJSON(b []byte) error { -+ var s string -+ if err := json.Unmarshal(b, &s); err != nil { -+ return err -+ } -+ temp, err := NewVersion(s) -+ if err != nil { -+ return err -+ } -+ v.major = temp.major -+ v.minor = temp.minor -+ v.patch = temp.patch -+ v.pre = temp.pre -+ v.metadata = temp.metadata -+ v.original = temp.original -+ return nil -+} -+ -+// MarshalJSON implements JSON.Marshaler interface. -+func (v Version) MarshalJSON() ([]byte, error) { -+ return json.Marshal(v.String()) -+} -+ -+// Scan implements the SQL.Scanner interface. -+func (v *Version) Scan(value interface{}) error { -+ var s string -+ s, _ = value.(string) -+ temp, err := NewVersion(s) -+ if err != nil { -+ return err -+ } -+ v.major = temp.major -+ v.minor = temp.minor -+ v.patch = temp.patch -+ v.pre = temp.pre -+ v.metadata = temp.metadata -+ v.original = temp.original -+ return nil -+} -+ -+// Value implements the Driver.Valuer interface. -+func (v Version) Value() (driver.Value, error) { -+ return v.String(), nil -+} -+ -+func compareSegment(v, o uint64) int { -+ if v < o { -+ return -1 -+ } -+ if v > o { -+ return 1 -+ } -+ -+ return 0 -+} -+ -+func comparePrerelease(v, o string) int { -+ -+ // split the prelease versions by their part. The separator, per the spec, -+ // is a . -+ sparts := strings.Split(v, ""."") -+ oparts := strings.Split(o, ""."") -+ -+ // Find the longer length of the parts to know how many loop iterations to -+ // go through. -+ slen := len(sparts) -+ olen := len(oparts) -+ -+ l := slen -+ if olen > slen { -+ l = olen -+ } -+ -+ // Iterate over each part of the prereleases to compare the differences. -+ for i := 0; i < l; i++ { -+ // Since the lentgh of the parts can be different we need to create -+ // a placeholder. This is to avoid out of bounds issues. -+ stemp := """" -+ if i < slen { -+ stemp = sparts[i] -+ } -+ -+ otemp := """" -+ if i < olen { -+ otemp = oparts[i] -+ } -+ -+ d := comparePrePart(stemp, otemp) -+ if d != 0 { -+ return d -+ } -+ } -+ -+ // Reaching here means two versions are of equal value but have different -+ // metadata (the part following a +). They are not identical in string form -+ // but the version comparison finds them to be equal. -+ return 0 -+} -+ -+func comparePrePart(s, o string) int { -+ // Fastpath if they are equal -+ if s == o { -+ return 0 -+ } -+ -+ // When s or o are empty we can use the other in an attempt to determine -+ // the response. -+ if s == """" { -+ if o != """" { -+ return -1 -+ } -+ return 1 -+ } -+ -+ if o == """" { -+ if s != """" { -+ return 1 -+ } -+ return -1 -+ } -+ -+ // When comparing strings ""99"" is greater than ""103"". To handle -+ // cases like this we need to detect numbers and compare them. According -+ // to the semver spec, numbers are always positive. If there is a - at the -+ // start like -99 this is to be evaluated as an alphanum. numbers always -+ // have precedence over alphanum. Parsing as Uints because negative numbers -+ // are ignored. -+ -+ oi, n1 := strconv.ParseUint(o, 10, 64) -+ si, n2 := strconv.ParseUint(s, 10, 64) -+ -+ // The case where both are strings compare the strings -+ if n1 != nil && n2 != nil { -+ if s > o { -+ return 1 -+ } -+ return -1 -+ } else if n1 != nil { -+ // o is a string and s is a number -+ return -1 -+ } else if n2 != nil { -+ // s is a string and o is a number -+ return 1 -+ } -+ // Both are numbers -+ if si > oi { -+ return 1 -+ } -+ return -1 -+ -+} -+ -+// Like strings.ContainsAny but does an only instead of any. -+func containsOnly(s string, comp string) bool { -+ return strings.IndexFunc(s, func(r rune) bool { -+ return !strings.ContainsRune(comp, r) -+ }) == -1 -+} -+ -+// From the spec, ""Identifiers MUST comprise only -+// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. -+// Numeric identifiers MUST NOT include leading zeroes."". These segments can -+// be dot separated. -+func validatePrerelease(p string) error { -+ eparts := strings.Split(p, ""."") -+ for _, p := range eparts { -+ if containsOnly(p, num) { -+ if len(p) > 1 && p[0] == '0' { -+ return ErrSegmentStartsZero -+ } -+ } else if !containsOnly(p, allowed) { -+ return ErrInvalidPrerelease -+ } -+ } -+ -+ return nil -+} -+ -+// From the spec, ""Build metadata MAY be denoted by -+// appending a plus sign and a series of dot separated identifiers immediately -+// following the patch or pre-release version. Identifiers MUST comprise only -+// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty."" -+func validateMetadata(m string) error { -+ eparts := strings.Split(m, ""."") -+ for _, p := range eparts { -+ if !containsOnly(p, allowed) { -+ return ErrInvalidMetadata -+ } -+ } -+ return nil -+} -diff --git a/vendor/github.com/Masterminds/sprig/v3/.gitignore b/vendor/github.com/Masterminds/sprig/v3/.gitignore -new file mode 100644 -index 0000000000000..5e3002f88f514 ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/.gitignore -@@ -0,0 +1,2 @@ -+vendor/ -+/.glide -diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md -new file mode 100644 -index 0000000000000..fcdd4e88aed41 ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md -@@ -0,0 +1,370 @@ -+# Changelog -+ -+## Release 3.2.1 (2021-02-04) -+ -+### Changed -+ -+- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) -+ -+## Release 3.2.0 (2020-12-14) -+ -+### Added -+ -+- #211: Added randInt function (thanks @kochurovro) -+- #223: Added fromJson and mustFromJson functions (thanks @mholt) -+- #242: Added a bcrypt function (thanks @robbiet480) -+- #253: Added randBytes function (thanks @MikaelSmith) -+- #254: Added dig function for dicts (thanks @nyarly) -+- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) -+- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) -+- #268: Added and and all functions for testing conditions (thanks @phuslu) -+- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf -+ (thanks @andrewmostello) -+- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) -+- #270: Extend certificate functions to handle non-RSA keys + add support for -+ ed25519 keys (thanks @misberner) -+ -+### Changed -+ -+- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer -+- Using semver 3.1.1 and mergo 0.3.11 -+ -+### Fixed -+ -+- #249: Fix htmlDateInZone example (thanks @spawnia) -+ -+NOTE: The dependency github.com/imdario/mergo reverted the breaking change in -+0.3.9 via 0.3.10 release. -+ -+## Release 3.1.0 (2020-04-16) -+ -+NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 -+that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. -+ -+### Added -+ -+- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) -+- #224: Added duration filter (thanks @frebib) -+- #205: Added `seq` function (thanks @thadc23) -+ -+### Changed -+ -+- #203: Unlambda functions with correct signature (thanks @muesli) -+- #236: Updated the license formatting for GitHub display purposes -+- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 -+ as it causes a breaking change for sprig. That issue is tracked at -+ https://github.com/imdario/mergo/issues/139 -+ -+### Fixed -+ -+- #229: Fix `seq` example in docs (thanks @kalmant) -+ -+## Release 3.0.2 (2019-12-13) -+ -+### Fixed -+ -+- #220: Updating to semver v3.0.3 to fix issue with <= ranges -+- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) -+ -+## Release 3.0.1 (2019-12-08) -+ -+### Fixed -+ -+- #212: Updated semver fixing broken constraint checking with ^0.0 -+ -+## Release 3.0.0 (2019-10-02) -+ -+### Added -+ -+- #187: Added durationRound function (thanks @yjp20) -+- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) -+- #193: Added toRawJson support (thanks @Dean-Coakley) -+- #197: Added get support to dicts (thanks @Dean-Coakley) -+ -+### Changed -+ -+- #186: Moving dependency management to Go modules -+- #186: Updated semver to v3. This has changes in the way ^ is handled -+- #194: Updated documentation on merging and how it copies. Added example using deepCopy -+- #196: trunc now supports negative values (thanks @Dean-Coakley) -+ -+## Release 2.22.0 (2019-10-02) -+ -+### Added -+ -+- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) -+- #195: Added deepCopy function for use with dicts -+ -+### Changed -+ -+- Updated merge and mergeOverwrite documentation to explain copying and how to -+ use deepCopy with it -+ -+## Release 2.21.0 (2019-09-18) -+ -+### Added -+ -+- #122: Added encryptAES/decryptAES functions (thanks @n0madic) -+- #128: Added toDecimal support (thanks @Dean-Coakley) -+- #169: Added list contcat (thanks @astorath) -+- #174: Added deepEqual function (thanks @bonifaido) -+- #170: Added url parse and join functions (thanks @astorath) -+ -+### Changed -+ -+- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify -+ -+### Fixed -+ -+- #172: Fix semver wildcard example (thanks @piepmatz) -+- #175: Fix dateInZone doc example (thanks @s3than) -+ -+## Release 2.20.0 (2019-06-18) -+ -+### Added -+ -+- #164: Adding function to get unix epoch for a time (@mattfarina) -+- #166: Adding tests for date_in_zone (@mattfarina) -+ -+### Changed -+ -+- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) -+- #150: Handles pointer type for time.Time in ""htmlDate"" (@mapreal19) -+- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) -+ -+### Fixed -+ -+## Release 2.19.0 (2019-03-02) -+ -+IMPORTANT: This release reverts a change from 2.18.0 -+ -+In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. -+ -+We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. -+ -+### Changed -+ -+- Fix substr panic 35fb796 (Alexey igrychev) -+- Remove extra period 1eb7729 (Matthew Lorimor) -+- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) -+- README edits/fixes/suggestions 08fe136 (Lauri Apple) -+ -+ -+## Release 2.18.0 (2019-02-12) -+ -+### Added -+ -+- Added mergeOverwrite function -+- cryptographic functions that use secure random (see fe1de12) -+ -+### Changed -+ -+- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) -+- Handle has for nil list 9c10885 (Daniel Cohen) -+- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) -+- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) -+- Replace outdated goutils imports 01893d2 (Matthew Lorimor) -+- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) -+- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) -+ -+### Fixed -+ -+- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) -+- Fix substr var names and comments d581f80 (Dean Coakley) -+- Fix substr documentation 2737203 (Dean Coakley) -+ -+## Release 2.17.1 (2019-01-03) -+ -+### Fixed -+ -+The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. -+ -+## Release 2.17.0 (2019-01-03) -+ -+### Added -+ -+- adds alder32sum function and test 6908fc2 (marshallford) -+- Added kebabcase function ca331a1 (Ilyes512) -+ -+### Changed -+ -+- Update goutils to 1.1.0 4e1125d (Matt Butcher) -+ -+### Fixed -+ -+- Fix 'has' documentation e3f2a85 (dean-coakley) -+- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) -+- fixes spelling errors... not sure how that happened 4cf188a (marshallford) -+ -+## Release 2.16.0 (2018-08-13) -+ -+### Added -+ -+- add splitn function fccb0b0 (Helgi Þorbjörnsson) -+- Add slice func df28ca7 (gongdo) -+- Generate serial number a3bdffd (Cody Coons) -+- Extract values of dict with values function df39312 (Lawrence Jones) -+ -+### Changed -+ -+- Modify panic message for list.slice ae38335 (gongdo) -+- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) -+- Remove duplicated documentation 1d97af1 (Matthew Fisher) -+- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) -+ -+### Fixed -+ -+- Fix file permissions c5f40b5 (gongdo) -+- Fix example for buildCustomCert 7779e0d (Tin Lam) -+ -+## Release 2.15.0 (2018-04-02) -+ -+### Added -+ -+- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) -+- #66: Add ternary function (thanks @binoculars) -+- #67: Allow keys function to take multiple dicts (thanks @binoculars) -+- #89: Added sha1sum to crypto function (thanks @benkeil) -+- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) -+- #92: Add travis testing for go 1.10 -+- #93: Adding appveyor config for windows testing -+ -+### Changed -+ -+- #90: Updating to more recent dependencies -+- #73: replace satori/go.uuid with google/uuid (thanks @petterw) -+ -+### Fixed -+ -+- #76: Fixed documentation typos (thanks @Thiht) -+- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older -+ -+## Release 2.14.1 (2017-12-01) -+ -+### Fixed -+ -+- #60: Fix typo in function name documentation (thanks @neil-ca-moore) -+- #61: Removing line with {{ due to blocking github pages genertion -+- #64: Update the list functions to handle int, string, and other slices for compatibility -+ -+## Release 2.14.0 (2017-10-06) -+ -+This new version of Sprig adds a set of functions for generating and working with SSL certificates. -+ -+- `genCA` generates an SSL Certificate Authority -+- `genSelfSignedCert` generates an SSL self-signed certificate -+- `genSignedCert` generates an SSL certificate and key based on a given CA -+ -+## Release 2.13.0 (2017-09-18) -+ -+This release adds new functions, including: -+ -+- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions -+- `floor`, `ceil`, and `round` math functions -+- `toDate` converts a string to a date -+- `nindent` is just like `indent` but also prepends a new line -+- `ago` returns the time from `time.Now` -+ -+### Added -+ -+- #40: Added basic regex functionality (thanks @alanquillin) -+- #41: Added ceil floor and round functions (thanks @alanquillin) -+- #48: Added toDate function (thanks @andreynering) -+- #50: Added nindent function (thanks @binoculars) -+- #46: Added ago function (thanks @slayer) -+ -+### Changed -+ -+- #51: Updated godocs to include new string functions (thanks @curtisallen) -+- #49: Added ability to merge multiple dicts (thanks @binoculars) -+ -+## Release 2.12.0 (2017-05-17) -+ -+- `snakecase`, `camelcase`, and `shuffle` are three new string functions -+- `fail` allows you to bail out of a template render when conditions are not met -+ -+## Release 2.11.0 (2017-05-02) -+ -+- Added `toJson` and `toPrettyJson` -+- Added `merge` -+- Refactored documentation -+ -+## Release 2.10.0 (2017-03-15) -+ -+- Added `semver` and `semverCompare` for Semantic Versions -+- `list` replaces `tuple` -+- Fixed issue with `join` -+- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` -+ -+## Release 2.9.0 (2017-02-23) -+ -+- Added `splitList` to split a list -+- Added crypto functions of `genPrivateKey` and `derivePassword` -+ -+## Release 2.8.0 (2016-12-21) -+ -+- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) -+- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) -+ -+## Release 2.7.0 (2016-12-01) -+ -+- Added `sha256sum` to generate a hash of an input -+- Added functions to convert a numeric or string to `int`, `int64`, `float64` -+ -+## Release 2.6.0 (2016-10-03) -+ -+- Added a `uuidv4` template function for generating UUIDs inside of a template. -+ -+## Release 2.5.0 (2016-08-19) -+ -+- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions -+- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) -+- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 -+ -+## Release 2.4.0 (2016-08-16) -+ -+- Adds two functions: `until` and `untilStep` -+ -+## Release 2.3.0 (2016-06-21) -+ -+- cat: Concatenate strings with whitespace separators. -+- replace: Replace parts of a string: `replace "" "" ""-"" ""Me First""` renders ""Me-First"" -+- plural: Format plurals: `len ""foo"" | plural ""one foo"" ""many foos""` renders ""many foos"" -+- indent: Indent blocks of text in a way that is sensitive to ""\n"" characters. -+ -+## Release 2.2.0 (2016-04-21) -+ -+- Added a `genPrivateKey` function (Thanks @bacongobbler) -+ -+## Release 2.1.0 (2016-03-30) -+ -+- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default ""bar""}}`. -+- Added accessors for ""hermetic"" functions. These return only functions that, when given the same input, produce the same output. -+ -+## Release 2.0.0 (2016-03-29) -+ -+Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. -+ -+- `min` complements `max` (formerly `biggest`) -+- `empty` indicates that a value is the empty value for its type -+- `tuple` creates a tuple inside of a template: `{{$t := tuple ""a"", ""b"" ""c""}}` -+- `dict` creates a dictionary inside of a template `{{$d := dict ""key1"" ""val1"" ""key2"" ""val2""}}` -+- Date formatters have been added for HTML dates (as used in `date` input fields) -+- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). -+ -+## Release 1.2.0 (2016-02-01) -+ -+- Added quote and squote -+- Added b32enc and b32dec -+- add now takes varargs -+- biggest now takes varargs -+ -+## Release 1.1.0 (2015-12-29) -+ -+- Added #4: Added contains function. strings.Contains, but with the arguments -+ switched to simplify common pipelines. (thanks krancour) -+- Added Travis-CI testing support -+ -+## Release 1.0.0 (2015-12-23) -+ -+- Initial release -diff --git a/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt -new file mode 100644 -index 0000000000000..f311b1eaaaa84 ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt -@@ -0,0 +1,19 @@ -+Copyright (C) 2013-2020 Masterminds -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the ""Software""), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -+THE SOFTWARE. -diff --git a/vendor/github.com/Masterminds/sprig/v3/Makefile b/vendor/github.com/Masterminds/sprig/v3/Makefile -new file mode 100644 -index 0000000000000..78d409cde2c5e ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/Makefile -@@ -0,0 +1,9 @@ -+.PHONY: test -+test: -+ @echo ""==> Running tests"" -+ GO111MODULE=on go test -v -+ -+.PHONY: test-cover -+test-cover: -+ @echo ""==> Running Tests with coverage"" -+ GO111MODULE=on go test -cover . -diff --git a/vendor/github.com/Masterminds/sprig/v3/README.md b/vendor/github.com/Masterminds/sprig/v3/README.md -new file mode 100644 -index 0000000000000..c37ba01c2162b ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/README.md -@@ -0,0 +1,101 @@ -+# Sprig: Template functions for Go templates -+ -+[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/sprig/v3) -+[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/sprig)](https://goreportcard.com/report/github.com/Masterminds/sprig) -+[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) -+[![](https://github.com/Masterminds/sprig/workflows/Tests/badge.svg)](https://github.com/Masterminds/sprig/actions) -+ -+The Go language comes with a [built-in template -+language](http://golang.org/pkg/text/template/), but not -+very many template functions. Sprig is a library that provides more than 100 commonly -+used template functions. -+ -+It is inspired by the template functions found in -+[Twig](http://twig.sensiolabs.org/documentation) and in various -+JavaScript libraries, such as [underscore.js](http://underscorejs.org/). -+ -+## IMPORTANT NOTES -+ -+Sprig leverages [mergo](https://github.com/imdario/mergo) to handle merges. In -+its v0.3.9 release there was a behavior change that impacts merging template -+functions in sprig. It is currently recommended to use v0.3.8 of that package. -+Using v0.3.9 will cause sprig tests to fail. The issue in mergo is tracked at -+https://github.com/imdario/mergo/issues/139. -+ -+## Package Versions -+ -+There are two active major versions of the `sprig` package. -+ -+* v3 is currently stable release series on the `master` branch. The Go API should -+ remain compatible with v2, the current stable version. Behavior change behind -+ some functions is the reason for the new major version. -+* v2 is the previous stable release series. It has been more than three years since -+ the initial release of v2. You can read the documentation and see the code -+ on the [release-2](https://github.com/Masterminds/sprig/tree/release-2) branch. -+ Bug fixes to this major version will continue for some time. -+ -+## Usage -+ -+**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for -+detailed instructions and code snippets for the >100 template functions available. -+ -+**Go developers**: If you'd like to include Sprig as a library in your program, -+our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig). -+ -+For standard usage, read on. -+ -+### Load the Sprig library -+ -+To load the Sprig `FuncMap`: -+ -+```go -+ -+import ( -+ ""github.com/Masterminds/sprig"" -+ ""html/template"" -+) -+ -+// This example illustrates that the FuncMap *must* be set before the -+// templates themselves are loaded. -+tpl := template.Must( -+ template.New(""base"").Funcs(sprig.FuncMap()).ParseGlob(""*.html"") -+) -+ -+ -+``` -+ -+### Calling the functions inside of templates -+ -+By convention, all functions are lowercase. This seems to follow the Go -+idiom for template functions (as opposed to template methods, which are -+TitleCase). For example, this: -+ -+``` -+{{ ""hello!"" | upper | repeat 5 }} -+``` -+ -+produces this: -+ -+``` -+HELLO!HELLO!HELLO!HELLO!HELLO! -+``` -+ -+## Principles Driving Our Function Selection -+ -+We followed these principles to decide which functions to add and how to implement them: -+ -+- Use template functions to build layout. The following -+ types of operations are within the domain of template functions: -+ - Formatting -+ - Layout -+ - Simple type conversions -+ - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) -+- Template functions should not return errors unless there is no way to print -+ a sensible value. For example, converting a string to an integer should not -+ produce an error if conversion fails. Instead, it should display a default -+ value. -+- Simple math is necessary for grid layouts, pagers, and so on. Complex math -+ (anything other than arithmetic) should be done outside of templates. -+- Template functions only deal with the data passed into them. They never retrieve -+ data from a source. -+- Finally, do not override core Go template functions. -diff --git a/vendor/github.com/Masterminds/sprig/v3/crypto.go b/vendor/github.com/Masterminds/sprig/v3/crypto.go -new file mode 100644 -index 0000000000000..13a5cd55934e5 ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/crypto.go -@@ -0,0 +1,653 @@ -+package sprig -+ -+import ( -+ ""bytes"" -+ ""crypto"" -+ ""crypto/aes"" -+ ""crypto/cipher"" -+ ""crypto/dsa"" -+ ""crypto/ecdsa"" -+ ""crypto/ed25519"" -+ ""crypto/elliptic"" -+ ""crypto/hmac"" -+ ""crypto/rand"" -+ ""crypto/rsa"" -+ ""crypto/sha1"" -+ ""crypto/sha256"" -+ ""crypto/x509"" -+ ""crypto/x509/pkix"" -+ ""encoding/asn1"" -+ ""encoding/base64"" -+ ""encoding/binary"" -+ ""encoding/hex"" -+ ""encoding/pem"" -+ ""errors"" -+ ""fmt"" -+ ""hash/adler32"" -+ ""io"" -+ ""math/big"" -+ ""net"" -+ ""time"" -+ -+ ""strings"" -+ -+ ""github.com/google/uuid"" -+ bcrypt_lib ""golang.org/x/crypto/bcrypt"" -+ ""golang.org/x/crypto/scrypt"" -+) -+ -+func sha256sum(input string) string { -+ hash := sha256.Sum256([]byte(input)) -+ return hex.EncodeToString(hash[:]) -+} -+ -+func sha1sum(input string) string { -+ hash := sha1.Sum([]byte(input)) -+ return hex.EncodeToString(hash[:]) -+} -+ -+func adler32sum(input string) string { -+ hash := adler32.Checksum([]byte(input)) -+ return fmt.Sprintf(""%d"", hash) -+} -+ -+func bcrypt(input string) string { -+ hash, err := bcrypt_lib.GenerateFromPassword([]byte(input), bcrypt_lib.DefaultCost) -+ if err != nil { -+ return fmt.Sprintf(""failed to encrypt string with bcrypt: %s"", err) -+ } -+ -+ return string(hash) -+} -+ -+func htpasswd(username string, password string) string { -+ if strings.Contains(username, "":"") { -+ return fmt.Sprintf(""invalid username: %s"", username) -+ } -+ return fmt.Sprintf(""%s:%s"", username, bcrypt(password)) -+} -+ -+func randBytes(count int) (string, error) { -+ buf := make([]byte, count) -+ if _, err := rand.Read(buf); err != nil { -+ return """", err -+ } -+ return base64.StdEncoding.EncodeToString(buf), nil -+} -+ -+// uuidv4 provides a safe and secure UUID v4 implementation -+func uuidv4() string { -+ return uuid.New().String() -+} -+ -+var masterPasswordSeed = ""com.lyndir.masterpassword"" -+ -+var passwordTypeTemplates = map[string][][]byte{ -+ ""maximum"": {[]byte(""anoxxxxxxxxxxxxxxxxx""), []byte(""axxxxxxxxxxxxxxxxxno"")}, -+ ""long"": {[]byte(""CvcvnoCvcvCvcv""), []byte(""CvcvCvcvnoCvcv""), []byte(""CvcvCvcvCvcvno""), []byte(""CvccnoCvcvCvcv""), []byte(""CvccCvcvnoCvcv""), -+ []byte(""CvccCvcvCvcvno""), []byte(""CvcvnoCvccCvcv""), []byte(""CvcvCvccnoCvcv""), []byte(""CvcvCvccCvcvno""), []byte(""CvcvnoCvcvCvcc""), -+ []byte(""CvcvCvcvnoCvcc""), []byte(""CvcvCvcvCvccno""), []byte(""CvccnoCvccCvcv""), []byte(""CvccCvccnoCvcv""), []byte(""CvccCvccCvcvno""), -+ []byte(""CvcvnoCvccCvcc""), []byte(""CvcvCvccnoCvcc""), []byte(""CvcvCvccCvccno""), []byte(""CvccnoCvcvCvcc""), []byte(""CvccCvcvnoCvcc""), -+ []byte(""CvccCvcvCvccno"")}, -+ ""medium"": {[]byte(""CvcnoCvc""), []byte(""CvcCvcno"")}, -+ ""short"": {[]byte(""Cvcn"")}, -+ ""basic"": {[]byte(""aaanaaan""), []byte(""aannaaan""), []byte(""aaannaaa"")}, -+ ""pin"": {[]byte(""nnnn"")}, -+} -+ -+var templateCharacters = map[byte]string{ -+ 'V': ""AEIOU"", -+ 'C': ""BCDFGHJKLMNPQRSTVWXYZ"", -+ 'v': ""aeiou"", -+ 'c': ""bcdfghjklmnpqrstvwxyz"", -+ 'A': ""AEIOUBCDFGHJKLMNPQRSTVWXYZ"", -+ 'a': ""AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz"", -+ 'n': ""0123456789"", -+ 'o': ""@&%?,=[]_:-+*$#!'^~;()/."", -+ 'x': ""AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()"", -+} -+ -+func derivePassword(counter uint32, passwordType, password, user, site string) string { -+ var templates = passwordTypeTemplates[passwordType] -+ if templates == nil { -+ return fmt.Sprintf(""cannot find password template %s"", passwordType) -+ } -+ -+ var buffer bytes.Buffer -+ buffer.WriteString(masterPasswordSeed) -+ binary.Write(&buffer, binary.BigEndian, uint32(len(user))) -+ buffer.WriteString(user) -+ -+ salt := buffer.Bytes() -+ key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64) -+ if err != nil { -+ return fmt.Sprintf(""failed to derive password: %s"", err) -+ } -+ -+ buffer.Truncate(len(masterPasswordSeed)) -+ binary.Write(&buffer, binary.BigEndian, uint32(len(site))) -+ buffer.WriteString(site) -+ binary.Write(&buffer, binary.BigEndian, counter) -+ -+ var hmacv = hmac.New(sha256.New, key) -+ hmacv.Write(buffer.Bytes()) -+ var seed = hmacv.Sum(nil) -+ var temp = templates[int(seed[0])%len(templates)] -+ -+ buffer.Truncate(0) -+ for i, element := range temp { -+ passChars := templateCharacters[element] -+ passChar := passChars[int(seed[i+1])%len(passChars)] -+ buffer.WriteByte(passChar) -+ } -+ -+ return buffer.String() -+} -+ -+func generatePrivateKey(typ string) string { -+ var priv interface{} -+ var err error -+ switch typ { -+ case """", ""rsa"": -+ // good enough for government work -+ priv, err = rsa.GenerateKey(rand.Reader, 4096) -+ case ""dsa"": -+ key := new(dsa.PrivateKey) -+ // again, good enough for government work -+ if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil { -+ return fmt.Sprintf(""failed to generate dsa params: %s"", err) -+ } -+ err = dsa.GenerateKey(key, rand.Reader) -+ priv = key -+ case ""ecdsa"": -+ // again, good enough for government work -+ priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) -+ case ""ed25519"": -+ _, priv, err = ed25519.GenerateKey(rand.Reader) -+ default: -+ return ""Unknown type "" + typ -+ } -+ if err != nil { -+ return fmt.Sprintf(""failed to generate private key: %s"", err) -+ } -+ -+ return string(pem.EncodeToMemory(pemBlockForKey(priv))) -+} -+ -+// DSAKeyFormat stores the format for DSA keys. -+// Used by pemBlockForKey -+type DSAKeyFormat struct { -+ Version int -+ P, Q, G, Y, X *big.Int -+} -+ -+func pemBlockForKey(priv interface{}) *pem.Block { -+ switch k := priv.(type) { -+ case *rsa.PrivateKey: -+ return &pem.Block{Type: ""RSA PRIVATE KEY"", Bytes: x509.MarshalPKCS1PrivateKey(k)} -+ case *dsa.PrivateKey: -+ val := DSAKeyFormat{ -+ P: k.P, Q: k.Q, G: k.G, -+ Y: k.Y, X: k.X, -+ } -+ bytes, _ := asn1.Marshal(val) -+ return &pem.Block{Type: ""DSA PRIVATE KEY"", Bytes: bytes} -+ case *ecdsa.PrivateKey: -+ b, _ := x509.MarshalECPrivateKey(k) -+ return &pem.Block{Type: ""EC PRIVATE KEY"", Bytes: b} -+ default: -+ // attempt PKCS#8 format for all other keys -+ b, err := x509.MarshalPKCS8PrivateKey(k) -+ if err != nil { -+ return nil -+ } -+ return &pem.Block{Type: ""PRIVATE KEY"", Bytes: b} -+ } -+} -+ -+func parsePrivateKeyPEM(pemBlock string) (crypto.PrivateKey, error) { -+ block, _ := pem.Decode([]byte(pemBlock)) -+ if block == nil { -+ return nil, errors.New(""no PEM data in input"") -+ } -+ -+ if block.Type == ""PRIVATE KEY"" { -+ priv, err := x509.ParsePKCS8PrivateKey(block.Bytes) -+ if err != nil { -+ return nil, fmt.Errorf(""decoding PEM as PKCS#8: %s"", err) -+ } -+ return priv, nil -+ } else if !strings.HasSuffix(block.Type, "" PRIVATE KEY"") { -+ return nil, fmt.Errorf(""no private key data in PEM block of type %s"", block.Type) -+ } -+ -+ switch block.Type[:len(block.Type)-12] { // strip "" PRIVATE KEY"" -+ case ""RSA"": -+ priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) -+ if err != nil { -+ return nil, fmt.Errorf(""parsing RSA private key from PEM: %s"", err) -+ } -+ return priv, nil -+ case ""EC"": -+ priv, err := x509.ParseECPrivateKey(block.Bytes) -+ if err != nil { -+ return nil, fmt.Errorf(""parsing EC private key from PEM: %s"", err) -+ } -+ return priv, nil -+ case ""DSA"": -+ var k DSAKeyFormat -+ _, err := asn1.Unmarshal(block.Bytes, &k) -+ if err != nil { -+ return nil, fmt.Errorf(""parsing DSA private key from PEM: %s"", err) -+ } -+ priv := &dsa.PrivateKey{ -+ PublicKey: dsa.PublicKey{ -+ Parameters: dsa.Parameters{ -+ P: k.P, Q: k.Q, G: k.G, -+ }, -+ Y: k.Y, -+ }, -+ X: k.X, -+ } -+ return priv, nil -+ default: -+ return nil, fmt.Errorf(""invalid private key type %s"", block.Type) -+ } -+} -+ -+func getPublicKey(priv crypto.PrivateKey) (crypto.PublicKey, error) { -+ switch k := priv.(type) { -+ case interface{ Public() crypto.PublicKey }: -+ return k.Public(), nil -+ case *dsa.PrivateKey: -+ return &k.PublicKey, nil -+ default: -+ return nil, fmt.Errorf(""unable to get public key for type %T"", priv) -+ } -+} -+ -+type certificate struct { -+ Cert string -+ Key string -+} -+ -+func buildCustomCertificate(b64cert string, b64key string) (certificate, error) { -+ crt := certificate{} -+ -+ cert, err := base64.StdEncoding.DecodeString(b64cert) -+ if err != nil { -+ return crt, errors.New(""unable to decode base64 certificate"") -+ } -+ -+ key, err := base64.StdEncoding.DecodeString(b64key) -+ if err != nil { -+ return crt, errors.New(""unable to decode base64 private key"") -+ } -+ -+ decodedCert, _ := pem.Decode(cert) -+ if decodedCert == nil { -+ return crt, errors.New(""unable to decode certificate"") -+ } -+ _, err = x509.ParseCertificate(decodedCert.Bytes) -+ if err != nil { -+ return crt, fmt.Errorf( -+ ""error parsing certificate: decodedCert.Bytes: %s"", -+ err, -+ ) -+ } -+ -+ _, err = parsePrivateKeyPEM(string(key)) -+ if err != nil { -+ return crt, fmt.Errorf( -+ ""error parsing private key: %s"", -+ err, -+ ) -+ } -+ -+ crt.Cert = string(cert) -+ crt.Key = string(key) -+ -+ return crt, nil -+} -+ -+func generateCertificateAuthority( -+ cn string, -+ daysValid int, -+) (certificate, error) { -+ priv, err := rsa.GenerateKey(rand.Reader, 2048) -+ if err != nil { -+ return certificate{}, fmt.Errorf(""error generating rsa key: %s"", err) -+ } -+ -+ return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) -+} -+ -+func generateCertificateAuthorityWithPEMKey( -+ cn string, -+ daysValid int, -+ privPEM string, -+) (certificate, error) { -+ priv, err := parsePrivateKeyPEM(privPEM) -+ if err != nil { -+ return certificate{}, fmt.Errorf(""parsing private key: %s"", err) -+ } -+ return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) -+} -+ -+func generateCertificateAuthorityWithKeyInternal( -+ cn string, -+ daysValid int, -+ priv crypto.PrivateKey, -+) (certificate, error) { -+ ca := certificate{} -+ -+ template, err := getBaseCertTemplate(cn, nil, nil, daysValid) -+ if err != nil { -+ return ca, err -+ } -+ // Override KeyUsage and IsCA -+ template.KeyUsage = x509.KeyUsageKeyEncipherment | -+ x509.KeyUsageDigitalSignature | -+ x509.KeyUsageCertSign -+ template.IsCA = true -+ -+ ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) -+ -+ return ca, err -+} -+ -+func generateSelfSignedCertificate( -+ cn string, -+ ips []interface{}, -+ alternateDNS []interface{}, -+ daysValid int, -+) (certificate, error) { -+ priv, err := rsa.GenerateKey(rand.Reader, 2048) -+ if err != nil { -+ return certificate{}, fmt.Errorf(""error generating rsa key: %s"", err) -+ } -+ return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) -+} -+ -+func generateSelfSignedCertificateWithPEMKey( -+ cn string, -+ ips []interface{}, -+ alternateDNS []interface{}, -+ daysValid int, -+ privPEM string, -+) (certificate, error) { -+ priv, err := parsePrivateKeyPEM(privPEM) -+ if err != nil { -+ return certificate{}, fmt.Errorf(""parsing private key: %s"", err) -+ } -+ return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) -+} -+ -+func generateSelfSignedCertificateWithKeyInternal( -+ cn string, -+ ips []interface{}, -+ alternateDNS []interface{}, -+ daysValid int, -+ priv crypto.PrivateKey, -+) (certificate, error) { -+ cert := certificate{} -+ -+ template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) -+ if err != nil { -+ return cert, err -+ } -+ -+ cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) -+ -+ return cert, err -+} -+ -+func generateSignedCertificate( -+ cn string, -+ ips []interface{}, -+ alternateDNS []interface{}, -+ daysValid int, -+ ca certificate, -+) (certificate, error) { -+ priv, err := rsa.GenerateKey(rand.Reader, 2048) -+ if err != nil { -+ return certificate{}, fmt.Errorf(""error generating rsa key: %s"", err) -+ } -+ return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) -+} -+ -+func generateSignedCertificateWithPEMKey( -+ cn string, -+ ips []interface{}, -+ alternateDNS []interface{}, -+ daysValid int, -+ ca certificate, -+ privPEM string, -+) (certificate, error) { -+ priv, err := parsePrivateKeyPEM(privPEM) -+ if err != nil { -+ return certificate{}, fmt.Errorf(""parsing private key: %s"", err) -+ } -+ return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) -+} -+ -+func generateSignedCertificateWithKeyInternal( -+ cn string, -+ ips []interface{}, -+ alternateDNS []interface{}, -+ daysValid int, -+ ca certificate, -+ priv crypto.PrivateKey, -+) (certificate, error) { -+ cert := certificate{} -+ -+ decodedSignerCert, _ := pem.Decode([]byte(ca.Cert)) -+ if decodedSignerCert == nil { -+ return cert, errors.New(""unable to decode certificate"") -+ } -+ signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes) -+ if err != nil { -+ return cert, fmt.Errorf( -+ ""error parsing certificate: decodedSignerCert.Bytes: %s"", -+ err, -+ ) -+ } -+ signerKey, err := parsePrivateKeyPEM(ca.Key) -+ if err != nil { -+ return cert, fmt.Errorf( -+ ""error parsing private key: %s"", -+ err, -+ ) -+ } -+ -+ template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) -+ if err != nil { -+ return cert, err -+ } -+ -+ cert.Cert, cert.Key, err = getCertAndKey( -+ template, -+ priv, -+ signerCert, -+ signerKey, -+ ) -+ -+ return cert, err -+} -+ -+func getCertAndKey( -+ template *x509.Certificate, -+ signeeKey crypto.PrivateKey, -+ parent *x509.Certificate, -+ signingKey crypto.PrivateKey, -+) (string, string, error) { -+ signeePubKey, err := getPublicKey(signeeKey) -+ if err != nil { -+ return """", """", fmt.Errorf(""error retrieving public key from signee key: %s"", err) -+ } -+ derBytes, err := x509.CreateCertificate( -+ rand.Reader, -+ template, -+ parent, -+ signeePubKey, -+ signingKey, -+ ) -+ if err != nil { -+ return """", """", fmt.Errorf(""error creating certificate: %s"", err) -+ } -+ -+ certBuffer := bytes.Buffer{} -+ if err := pem.Encode( -+ &certBuffer, -+ &pem.Block{Type: ""CERTIFICATE"", Bytes: derBytes}, -+ ); err != nil { -+ return """", """", fmt.Errorf(""error pem-encoding certificate: %s"", err) -+ } -+ -+ keyBuffer := bytes.Buffer{} -+ if err := pem.Encode( -+ &keyBuffer, -+ pemBlockForKey(signeeKey), -+ ); err != nil { -+ return """", """", fmt.Errorf(""error pem-encoding key: %s"", err) -+ } -+ -+ return certBuffer.String(), keyBuffer.String(), nil -+} -+ -+func getBaseCertTemplate( -+ cn string, -+ ips []interface{}, -+ alternateDNS []interface{}, -+ daysValid int, -+) (*x509.Certificate, error) { -+ ipAddresses, err := getNetIPs(ips) -+ if err != nil { -+ return nil, err -+ } -+ dnsNames, err := getAlternateDNSStrs(alternateDNS) -+ if err != nil { -+ return nil, err -+ } -+ serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128) -+ serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound) -+ if err != nil { -+ return nil, err -+ } -+ return &x509.Certificate{ -+ SerialNumber: serialNumber, -+ Subject: pkix.Name{ -+ CommonName: cn, -+ }, -+ IPAddresses: ipAddresses, -+ DNSNames: dnsNames, -+ NotBefore: time.Now(), -+ NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)), -+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, -+ ExtKeyUsage: []x509.ExtKeyUsage{ -+ x509.ExtKeyUsageServerAuth, -+ x509.ExtKeyUsageClientAuth, -+ }, -+ BasicConstraintsValid: true, -+ }, nil -+} -+ -+func getNetIPs(ips []interface{}) ([]net.IP, error) { -+ if ips == nil { -+ return []net.IP{}, nil -+ } -+ var ipStr string -+ var ok bool -+ var netIP net.IP -+ netIPs := make([]net.IP, len(ips)) -+ for i, ip := range ips { -+ ipStr, ok = ip.(string) -+ if !ok { -+ return nil, fmt.Errorf(""error parsing ip: %v is not a string"", ip) -+ } -+ netIP = net.ParseIP(ipStr) -+ if netIP == nil { -+ return nil, fmt.Errorf(""error parsing ip: %s"", ipStr) -+ } -+ netIPs[i] = netIP -+ } -+ return netIPs, nil -+} -+ -+func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { -+ if alternateDNS == nil { -+ return []string{}, nil -+ } -+ var dnsStr string -+ var ok bool -+ alternateDNSStrs := make([]string, len(alternateDNS)) -+ for i, dns := range alternateDNS { -+ dnsStr, ok = dns.(string) -+ if !ok { -+ return nil, fmt.Errorf( -+ ""error processing alternate dns name: %v is not a string"", -+ dns, -+ ) -+ } -+ alternateDNSStrs[i] = dnsStr -+ } -+ return alternateDNSStrs, nil -+} -+ -+func encryptAES(password string, plaintext string) (string, error) { -+ if plaintext == """" { -+ return """", nil -+ } -+ -+ key := make([]byte, 32) -+ copy(key, []byte(password)) -+ block, err := aes.NewCipher(key) -+ if err != nil { -+ return """", err -+ } -+ -+ content := []byte(plaintext) -+ blockSize := block.BlockSize() -+ padding := blockSize - len(content)%blockSize -+ padtext := bytes.Repeat([]byte{byte(padding)}, padding) -+ content = append(content, padtext...) -+ -+ ciphertext := make([]byte, aes.BlockSize+len(content)) -+ -+ iv := ciphertext[:aes.BlockSize] -+ if _, err := io.ReadFull(rand.Reader, iv); err != nil { -+ return """", err -+ } -+ -+ mode := cipher.NewCBCEncrypter(block, iv) -+ mode.CryptBlocks(ciphertext[aes.BlockSize:], content) -+ -+ return base64.StdEncoding.EncodeToString(ciphertext), nil -+} -+ -+func decryptAES(password string, crypt64 string) (string, error) { -+ if crypt64 == """" { -+ return """", nil -+ } -+ -+ key := make([]byte, 32) -+ copy(key, []byte(password)) -+ -+ crypt, err := base64.StdEncoding.DecodeString(crypt64) -+ if err != nil { -+ return """", err -+ } -+ -+ block, err := aes.NewCipher(key) -+ if err != nil { -+ return """", err -+ } -+ -+ iv := crypt[:aes.BlockSize] -+ crypt = crypt[aes.BlockSize:] -+ decrypted := make([]byte, len(crypt)) -+ mode := cipher.NewCBCDecrypter(block, iv) -+ mode.CryptBlocks(decrypted, crypt) -+ -+ return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil -+} -diff --git a/vendor/github.com/Masterminds/sprig/v3/date.go b/vendor/github.com/Masterminds/sprig/v3/date.go -new file mode 100644 -index 0000000000000..ed022ddacac00 ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/date.go -@@ -0,0 +1,152 @@ -+package sprig -+ -+import ( -+ ""strconv"" -+ ""time"" -+) -+ -+// Given a format and a date, format the date string. -+// -+// Date can be a `time.Time` or an `int, int32, int64`. -+// In the later case, it is treated as seconds since UNIX -+// epoch. -+func date(fmt string, date interface{}) string { -+ return dateInZone(fmt, date, ""Local"") -+} -+ -+func htmlDate(date interface{}) string { -+ return dateInZone(""2006-01-02"", date, ""Local"") -+} -+ -+func htmlDateInZone(date interface{}, zone string) string { -+ return dateInZone(""2006-01-02"", date, zone) -+} -+ -+func dateInZone(fmt string, date interface{}, zone string) string { -+ var t time.Time -+ switch date := date.(type) { -+ default: -+ t = time.Now() -+ case time.Time: -+ t = date -+ case *time.Time: -+ t = *date -+ case int64: -+ t = time.Unix(date, 0) -+ case int: -+ t = time.Unix(int64(date), 0) -+ case int32: -+ t = time.Unix(int64(date), 0) -+ } -+ -+ loc, err := time.LoadLocation(zone) -+ if err != nil { -+ loc, _ = time.LoadLocation(""UTC"") -+ } -+ -+ return t.In(loc).Format(fmt) -+} -+ -+func dateModify(fmt string, date time.Time) time.Time { -+ d, err := time.ParseDuration(fmt) -+ if err != nil { -+ return date -+ } -+ return date.Add(d) -+} -+ -+func mustDateModify(fmt string, date time.Time) (time.Time, error) { -+ d, err := time.ParseDuration(fmt) -+ if err != nil { -+ return time.Time{}, err -+ } -+ return date.Add(d), nil -+} -+ -+func dateAgo(date interface{}) string { -+ var t time.Time -+ -+ switch date := date.(type) { -+ default: -+ t = time.Now() -+ case time.Time: -+ t = date -+ case int64: -+ t = time.Unix(date, 0) -+ case int: -+ t = time.Unix(int64(date), 0) -+ } -+ // Drop resolution to seconds -+ duration := time.Since(t).Round(time.Second) -+ return duration.String() -+} -+ -+func duration(sec interface{}) string { -+ var n int64 -+ switch value := sec.(type) { -+ default: -+ n = 0 -+ case string: -+ n, _ = strconv.ParseInt(value, 10, 64) -+ case int64: -+ n = value -+ } -+ return (time.Duration(n) * time.Second).String() -+} -+ -+func durationRound(duration interface{}) string { -+ var d time.Duration -+ switch duration := duration.(type) { -+ default: -+ d = 0 -+ case string: -+ d, _ = time.ParseDuration(duration) -+ case int64: -+ d = time.Duration(duration) -+ case time.Time: -+ d = time.Since(duration) -+ } -+ -+ u := uint64(d) -+ neg := d < 0 -+ if neg { -+ u = -u -+ } -+ -+ var ( -+ year = uint64(time.Hour) * 24 * 365 -+ month = uint64(time.Hour) * 24 * 30 -+ day = uint64(time.Hour) * 24 -+ hour = uint64(time.Hour) -+ minute = uint64(time.Minute) -+ second = uint64(time.Second) -+ ) -+ switch { -+ case u > year: -+ return strconv.FormatUint(u/year, 10) + ""y"" -+ case u > month: -+ return strconv.FormatUint(u/month, 10) + ""mo"" -+ case u > day: -+ return strconv.FormatUint(u/day, 10) + ""d"" -+ case u > hour: -+ return strconv.FormatUint(u/hour, 10) + ""h"" -+ case u > minute: -+ return strconv.FormatUint(u/minute, 10) + ""m"" -+ case u > second: -+ return strconv.FormatUint(u/second, 10) + ""s"" -+ } -+ return ""0s"" -+} -+ -+func toDate(fmt, str string) time.Time { -+ t, _ := time.ParseInLocation(fmt, str, time.Local) -+ return t -+} -+ -+func mustToDate(fmt, str string) (time.Time, error) { -+ return time.ParseInLocation(fmt, str, time.Local) -+} -+ -+func unixEpoch(date time.Time) string { -+ return strconv.FormatInt(date.Unix(), 10) -+} -diff --git a/vendor/github.com/Masterminds/sprig/v3/defaults.go b/vendor/github.com/Masterminds/sprig/v3/defaults.go -new file mode 100644 -index 0000000000000..b9f979666dd3e ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/defaults.go -@@ -0,0 +1,163 @@ -+package sprig -+ -+import ( -+ ""bytes"" -+ ""encoding/json"" -+ ""math/rand"" -+ ""reflect"" -+ ""strings"" -+ ""time"" -+) -+ -+func init() { -+ rand.Seed(time.Now().UnixNano()) -+} -+ -+// dfault checks whether `given` is set, and returns default if not set. -+// -+// This returns `d` if `given` appears not to be set, and `given` otherwise. -+// -+// For numeric types 0 is unset. -+// For strings, maps, arrays, and slices, len() = 0 is considered unset. -+// For bool, false is unset. -+// Structs are never considered unset. -+// -+// For everything else, including pointers, a nil value is unset. -+func dfault(d interface{}, given ...interface{}) interface{} { -+ -+ if empty(given) || empty(given[0]) { -+ return d -+ } -+ return given[0] -+} -+ -+// empty returns true if the given value has the zero value for its type. -+func empty(given interface{}) bool { -+ g := reflect.ValueOf(given) -+ if !g.IsValid() { -+ return true -+ } -+ -+ // Basically adapted from text/template.isTrue -+ switch g.Kind() { -+ default: -+ return g.IsNil() -+ case reflect.Array, reflect.Slice, reflect.Map, reflect.String: -+ return g.Len() == 0 -+ case reflect.Bool: -+ return !g.Bool() -+ case reflect.Complex64, reflect.Complex128: -+ return g.Complex() == 0 -+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: -+ return g.Int() == 0 -+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: -+ return g.Uint() == 0 -+ case reflect.Float32, reflect.Float64: -+ return g.Float() == 0 -+ case reflect.Struct: -+ return false -+ } -+} -+ -+// coalesce returns the first non-empty value. -+func coalesce(v ...interface{}) interface{} { -+ for _, val := range v { -+ if !empty(val) { -+ return val -+ } -+ } -+ return nil -+} -+ -+// all returns true if empty(x) is false for all values x in the list. -+// If the list is empty, return true. -+func all(v ...interface{}) bool { -+ for _, val := range v { -+ if empty(val) { -+ return false -+ } -+ } -+ return true -+} -+ -+// any returns true if empty(x) is false for any x in the list. -+// If the list is empty, return false. -+func any(v ...interface{}) bool { -+ for _, val := range v { -+ if !empty(val) { -+ return true -+ } -+ } -+ return false -+} -+ -+// fromJson decodes JSON into a structured value, ignoring errors. -+func fromJson(v string) interface{} { -+ output, _ := mustFromJson(v) -+ return output -+} -+ -+// mustFromJson decodes JSON into a structured value, returning errors. -+func mustFromJson(v string) (interface{}, error) { -+ var output interface{} -+ err := json.Unmarshal([]byte(v), &output) -+ return output, err -+} -+ -+// toJson encodes an item into a JSON string -+func toJson(v interface{}) string { -+ output, _ := json.Marshal(v) -+ return string(output) -+} -+ -+func mustToJson(v interface{}) (string, error) { -+ output, err := json.Marshal(v) -+ if err != nil { -+ return """", err -+ } -+ return string(output), nil -+} -+ -+// toPrettyJson encodes an item into a pretty (indented) JSON string -+func toPrettyJson(v interface{}) string { -+ output, _ := json.MarshalIndent(v, """", "" "") -+ return string(output) -+} -+ -+func mustToPrettyJson(v interface{}) (string, error) { -+ output, err := json.MarshalIndent(v, """", "" "") -+ if err != nil { -+ return """", err -+ } -+ return string(output), nil -+} -+ -+// toRawJson encodes an item into a JSON string with no escaping of HTML characters. -+func toRawJson(v interface{}) string { -+ output, err := mustToRawJson(v) -+ if err != nil { -+ panic(err) -+ } -+ return string(output) -+} -+ -+// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. -+func mustToRawJson(v interface{}) (string, error) { -+ buf := new(bytes.Buffer) -+ enc := json.NewEncoder(buf) -+ enc.SetEscapeHTML(false) -+ err := enc.Encode(&v) -+ if err != nil { -+ return """", err -+ } -+ return strings.TrimSuffix(buf.String(), ""\n""), nil -+} -+ -+// ternary returns the first value if the last value is true, otherwise returns the second value. -+func ternary(vt interface{}, vf interface{}, v bool) interface{} { -+ if v { -+ return vt -+ } -+ -+ return vf -+} -diff --git a/vendor/github.com/Masterminds/sprig/v3/dict.go b/vendor/github.com/Masterminds/sprig/v3/dict.go -new file mode 100644 -index 0000000000000..ade88969840a5 ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/dict.go -@@ -0,0 +1,174 @@ -+package sprig -+ -+import ( -+ ""github.com/imdario/mergo"" -+ ""github.com/mitchellh/copystructure"" -+) -+ -+func get(d map[string]interface{}, key string) interface{} { -+ if val, ok := d[key]; ok { -+ return val -+ } -+ return """" -+} -+ -+func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { -+ d[key] = value -+ return d -+} -+ -+func unset(d map[string]interface{}, key string) map[string]interface{} { -+ delete(d, key) -+ return d -+} -+ -+func hasKey(d map[string]interface{}, key string) bool { -+ _, ok := d[key] -+ return ok -+} -+ -+func pluck(key string, d ...map[string]interface{}) []interface{} { -+ res := []interface{}{} -+ for _, dict := range d { -+ if val, ok := dict[key]; ok { -+ res = append(res, val) -+ } -+ } -+ return res -+} -+ -+func keys(dicts ...map[string]interface{}) []string { -+ k := []string{} -+ for _, dict := range dicts { -+ for key := range dict { -+ k = append(k, key) -+ } -+ } -+ return k -+} -+ -+func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { -+ res := map[string]interface{}{} -+ for _, k := range keys { -+ if v, ok := dict[k]; ok { -+ res[k] = v -+ } -+ } -+ return res -+} -+ -+func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { -+ res := map[string]interface{}{} -+ -+ omit := make(map[string]bool, len(keys)) -+ for _, k := range keys { -+ omit[k] = true -+ } -+ -+ for k, v := range dict { -+ if _, ok := omit[k]; !ok { -+ res[k] = v -+ } -+ } -+ return res -+} -+ -+func dict(v ...interface{}) map[string]interface{} { -+ dict := map[string]interface{}{} -+ lenv := len(v) -+ for i := 0; i < lenv; i += 2 { -+ key := strval(v[i]) -+ if i+1 >= lenv { -+ dict[key] = """" -+ continue -+ } -+ dict[key] = v[i+1] -+ } -+ return dict -+} -+ -+func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { -+ for _, src := range srcs { -+ if err := mergo.Merge(&dst, src); err != nil { -+ // Swallow errors inside of a template. -+ return """" -+ } -+ } -+ return dst -+} -+ -+func mustMerge(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { -+ for _, src := range srcs { -+ if err := mergo.Merge(&dst, src); err != nil { -+ return nil, err -+ } -+ } -+ return dst, nil -+} -+ -+func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { -+ for _, src := range srcs { -+ if err := mergo.MergeWithOverwrite(&dst, src); err != nil { -+ // Swallow errors inside of a template. -+ return """" -+ } -+ } -+ return dst -+} -+ -+func mustMergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { -+ for _, src := range srcs { -+ if err := mergo.MergeWithOverwrite(&dst, src); err != nil { -+ return nil, err -+ } -+ } -+ return dst, nil -+} -+ -+func values(dict map[string]interface{}) []interface{} { -+ values := []interface{}{} -+ for _, value := range dict { -+ values = append(values, value) -+ } -+ -+ return values -+} -+ -+func deepCopy(i interface{}) interface{} { -+ c, err := mustDeepCopy(i) -+ if err != nil { -+ panic(""deepCopy error: "" + err.Error()) -+ } -+ -+ return c -+} -+ -+func mustDeepCopy(i interface{}) (interface{}, error) { -+ return copystructure.Copy(i) -+} -+ -+func dig(ps ...interface{}) (interface{}, error) { -+ if len(ps) < 3 { -+ panic(""dig needs at least three arguments"") -+ } -+ dict := ps[len(ps)-1].(map[string]interface{}) -+ def := ps[len(ps)-2] -+ ks := make([]string, len(ps)-2) -+ for i := 0; i < len(ks); i++ { -+ ks[i] = ps[i].(string) -+ } -+ -+ return digFromDict(dict, def, ks) -+} -+ -+func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { -+ k, ns := ks[0], ks[1:len(ks)] -+ step, has := dict[k] -+ if !has { -+ return d, nil -+ } -+ if len(ns) == 0 { -+ return step, nil -+ } -+ return digFromDict(step.(map[string]interface{}), d, ns) -+} -diff --git a/vendor/github.com/Masterminds/sprig/v3/doc.go b/vendor/github.com/Masterminds/sprig/v3/doc.go -new file mode 100644 -index 0000000000000..aabb9d4489f96 ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/doc.go -@@ -0,0 +1,19 @@ -+/* -+Package sprig provides template functions for Go. -+ -+This package contains a number of utility functions for working with data -+inside of Go `html/template` and `text/template` files. -+ -+To add these functions, use the `template.Funcs()` method: -+ -+ t := templates.New(""foo"").Funcs(sprig.FuncMap()) -+ -+Note that you should add the function map before you parse any template files. -+ -+ In several cases, Sprig reverses the order of arguments from the way they -+ appear in the standard library. This is to make it easier to pipe -+ arguments into functions. -+ -+See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. -+*/ -+package sprig -diff --git a/vendor/github.com/Masterminds/sprig/v3/functions.go b/vendor/github.com/Masterminds/sprig/v3/functions.go -new file mode 100644 -index 0000000000000..57fcec1d9ea84 ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/functions.go -@@ -0,0 +1,382 @@ -+package sprig -+ -+import ( -+ ""errors"" -+ ""html/template"" -+ ""math/rand"" -+ ""os"" -+ ""path"" -+ ""path/filepath"" -+ ""reflect"" -+ ""strconv"" -+ ""strings"" -+ ttemplate ""text/template"" -+ ""time"" -+ -+ util ""github.com/Masterminds/goutils"" -+ ""github.com/huandu/xstrings"" -+ ""github.com/shopspring/decimal"" -+) -+ -+// FuncMap produces the function map. -+// -+// Use this to pass the functions into the template engine: -+// -+// tpl := template.New(""foo"").Funcs(sprig.FuncMap())) -+// -+func FuncMap() template.FuncMap { -+ return HtmlFuncMap() -+} -+ -+// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. -+func HermeticTxtFuncMap() ttemplate.FuncMap { -+ r := TxtFuncMap() -+ for _, name := range nonhermeticFunctions { -+ delete(r, name) -+ } -+ return r -+} -+ -+// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. -+func HermeticHtmlFuncMap() template.FuncMap { -+ r := HtmlFuncMap() -+ for _, name := range nonhermeticFunctions { -+ delete(r, name) -+ } -+ return r -+} -+ -+// TxtFuncMap returns a 'text/template'.FuncMap -+func TxtFuncMap() ttemplate.FuncMap { -+ return ttemplate.FuncMap(GenericFuncMap()) -+} -+ -+// HtmlFuncMap returns an 'html/template'.Funcmap -+func HtmlFuncMap() template.FuncMap { -+ return template.FuncMap(GenericFuncMap()) -+} -+ -+// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. -+func GenericFuncMap() map[string]interface{} { -+ gfm := make(map[string]interface{}, len(genericMap)) -+ for k, v := range genericMap { -+ gfm[k] = v -+ } -+ return gfm -+} -+ -+// These functions are not guaranteed to evaluate to the same result for given input, because they -+// refer to the environment or global state. -+var nonhermeticFunctions = []string{ -+ // Date functions -+ ""date"", -+ ""date_in_zone"", -+ ""date_modify"", -+ ""now"", -+ ""htmlDate"", -+ ""htmlDateInZone"", -+ ""dateInZone"", -+ ""dateModify"", -+ -+ // Strings -+ ""randAlphaNum"", -+ ""randAlpha"", -+ ""randAscii"", -+ ""randNumeric"", -+ ""randBytes"", -+ ""uuidv4"", -+ -+ // OS -+ ""env"", -+ ""expandenv"", -+ -+ // Network -+ ""getHostByName"", -+} -+ -+var genericMap = map[string]interface{}{ -+ ""hello"": func() string { return ""Hello!"" }, -+ -+ // Date functions -+ ""ago"": dateAgo, -+ ""date"": date, -+ ""date_in_zone"": dateInZone, -+ ""date_modify"": dateModify, -+ ""dateInZone"": dateInZone, -+ ""dateModify"": dateModify, -+ ""duration"": duration, -+ ""durationRound"": durationRound, -+ ""htmlDate"": htmlDate, -+ ""htmlDateInZone"": htmlDateInZone, -+ ""must_date_modify"": mustDateModify, -+ ""mustDateModify"": mustDateModify, -+ ""mustToDate"": mustToDate, -+ ""now"": time.Now, -+ ""toDate"": toDate, -+ ""unixEpoch"": unixEpoch, -+ -+ // Strings -+ ""abbrev"": abbrev, -+ ""abbrevboth"": abbrevboth, -+ ""trunc"": trunc, -+ ""trim"": strings.TrimSpace, -+ ""upper"": strings.ToUpper, -+ ""lower"": strings.ToLower, -+ ""title"": strings.Title, -+ ""untitle"": untitle, -+ ""substr"": substring, -+ // Switch order so that ""foo"" | repeat 5 -+ ""repeat"": func(count int, str string) string { return strings.Repeat(str, count) }, -+ // Deprecated: Use trimAll. -+ ""trimall"": func(a, b string) string { return strings.Trim(b, a) }, -+ // Switch order so that ""$foo"" | trimall ""$"" -+ ""trimAll"": func(a, b string) string { return strings.Trim(b, a) }, -+ ""trimSuffix"": func(a, b string) string { return strings.TrimSuffix(b, a) }, -+ ""trimPrefix"": func(a, b string) string { return strings.TrimPrefix(b, a) }, -+ ""nospace"": util.DeleteWhiteSpace, -+ ""initials"": initials, -+ ""randAlphaNum"": randAlphaNumeric, -+ ""randAlpha"": randAlpha, -+ ""randAscii"": randAscii, -+ ""randNumeric"": randNumeric, -+ ""swapcase"": util.SwapCase, -+ ""shuffle"": xstrings.Shuffle, -+ ""snakecase"": xstrings.ToSnakeCase, -+ ""camelcase"": xstrings.ToCamelCase, -+ ""kebabcase"": xstrings.ToKebabCase, -+ ""wrap"": func(l int, s string) string { return util.Wrap(s, l) }, -+ ""wrapWith"": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, -+ // Switch order so that ""foobar"" | contains ""foo"" -+ ""contains"": func(substr string, str string) bool { return strings.Contains(str, substr) }, -+ ""hasPrefix"": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, -+ ""hasSuffix"": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, -+ ""quote"": quote, -+ ""squote"": squote, -+ ""cat"": cat, -+ ""indent"": indent, -+ ""nindent"": nindent, -+ ""replace"": replace, -+ ""plural"": plural, -+ ""sha1sum"": sha1sum, -+ ""sha256sum"": sha256sum, -+ ""adler32sum"": adler32sum, -+ ""toString"": strval, -+ -+ // Wrap Atoi to stop errors. -+ ""atoi"": func(a string) int { i, _ := strconv.Atoi(a); return i }, -+ ""int64"": toInt64, -+ ""int"": toInt, -+ ""float64"": toFloat64, -+ ""seq"": seq, -+ ""toDecimal"": toDecimal, -+ -+ //""gt"": func(a, b int) bool {return a > b}, -+ //""gte"": func(a, b int) bool {return a >= b}, -+ //""lt"": func(a, b int) bool {return a < b}, -+ //""lte"": func(a, b int) bool {return a <= b}, -+ -+ // split ""/"" foo/bar returns map[int]string{0: foo, 1: bar} -+ ""split"": split, -+ ""splitList"": func(sep, orig string) []string { return strings.Split(orig, sep) }, -+ // splitn ""/"" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} -+ ""splitn"": splitn, -+ ""toStrings"": strslice, -+ -+ ""until"": until, -+ ""untilStep"": untilStep, -+ -+ // VERY basic arithmetic. -+ ""add1"": func(i interface{}) int64 { return toInt64(i) + 1 }, -+ ""add"": func(i ...interface{}) int64 { -+ var a int64 = 0 -+ for _, b := range i { -+ a += toInt64(b) -+ } -+ return a -+ }, -+ ""sub"": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, -+ ""div"": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, -+ ""mod"": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, -+ ""mul"": func(a interface{}, v ...interface{}) int64 { -+ val := toInt64(a) -+ for _, b := range v { -+ val = val * toInt64(b) -+ } -+ return val -+ }, -+ ""randInt"": func(min, max int) int { return rand.Intn(max-min) + min }, -+ ""add1f"": func(i interface{}) float64 { -+ return execDecimalOp(i, []interface{}{1}, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) -+ }, -+ ""addf"": func(i ...interface{}) float64 { -+ a := interface{}(float64(0)) -+ return execDecimalOp(a, i, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) -+ }, -+ ""subf"": func(a interface{}, v ...interface{}) float64 { -+ return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Sub(d2) }) -+ }, -+ ""divf"": func(a interface{}, v ...interface{}) float64 { -+ return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Div(d2) }) -+ }, -+ ""mulf"": func(a interface{}, v ...interface{}) float64 { -+ return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Mul(d2) }) -+ }, -+ ""biggest"": max, -+ ""max"": max, -+ ""min"": min, -+ ""maxf"": maxf, -+ ""minf"": minf, -+ ""ceil"": ceil, -+ ""floor"": floor, -+ ""round"": round, -+ -+ // string slices. Note that we reverse the order b/c that's better -+ // for template processing. -+ ""join"": join, -+ ""sortAlpha"": sortAlpha, -+ -+ // Defaults -+ ""default"": dfault, -+ ""empty"": empty, -+ ""coalesce"": coalesce, -+ ""all"": all, -+ ""any"": any, -+ ""compact"": compact, -+ ""mustCompact"": mustCompact, -+ ""fromJson"": fromJson, -+ ""toJson"": toJson, -+ ""toPrettyJson"": toPrettyJson, -+ ""toRawJson"": toRawJson, -+ ""mustFromJson"": mustFromJson, -+ ""mustToJson"": mustToJson, -+ ""mustToPrettyJson"": mustToPrettyJson, -+ ""mustToRawJson"": mustToRawJson, -+ ""ternary"": ternary, -+ ""deepCopy"": deepCopy, -+ ""mustDeepCopy"": mustDeepCopy, -+ -+ // Reflection -+ ""typeOf"": typeOf, -+ ""typeIs"": typeIs, -+ ""typeIsLike"": typeIsLike, -+ ""kindOf"": kindOf, -+ ""kindIs"": kindIs, -+ ""deepEqual"": reflect.DeepEqual, -+ -+ // OS: -+ ""env"": os.Getenv, -+ ""expandenv"": os.ExpandEnv, -+ -+ // Network: -+ ""getHostByName"": getHostByName, -+ -+ // Paths: -+ ""base"": path.Base, -+ ""dir"": path.Dir, -+ ""clean"": path.Clean, -+ ""ext"": path.Ext, -+ ""isAbs"": path.IsAbs, -+ -+ // Filepaths: -+ ""osBase"": filepath.Base, -+ ""osClean"": filepath.Clean, -+ ""osDir"": filepath.Dir, -+ ""osExt"": filepath.Ext, -+ ""osIsAbs"": filepath.IsAbs, -+ -+ // Encoding: -+ ""b64enc"": base64encode, -+ ""b64dec"": base64decode, -+ ""b32enc"": base32encode, -+ ""b32dec"": base32decode, -+ -+ // Data Structures: -+ ""tuple"": list, // FIXME: with the addition of append/prepend these are no longer immutable. -+ ""list"": list, -+ ""dict"": dict, -+ ""get"": get, -+ ""set"": set, -+ ""unset"": unset, -+ ""hasKey"": hasKey, -+ ""pluck"": pluck, -+ ""keys"": keys, -+ ""pick"": pick, -+ ""omit"": omit, -+ ""merge"": merge, -+ ""mergeOverwrite"": mergeOverwrite, -+ ""mustMerge"": mustMerge, -+ ""mustMergeOverwrite"": mustMergeOverwrite, -+ ""values"": values, -+ -+ ""append"": push, ""push"": push, -+ ""mustAppend"": mustPush, ""mustPush"": mustPush, -+ ""prepend"": prepend, -+ ""mustPrepend"": mustPrepend, -+ ""first"": first, -+ ""mustFirst"": mustFirst, -+ ""rest"": rest, -+ ""mustRest"": mustRest, -+ ""last"": last, -+ ""mustLast"": mustLast, -+ ""initial"": initial, -+ ""mustInitial"": mustInitial, -+ ""reverse"": reverse, -+ ""mustReverse"": mustReverse, -+ ""uniq"": uniq, -+ ""mustUniq"": mustUniq, -+ ""without"": without, -+ ""mustWithout"": mustWithout, -+ ""has"": has, -+ ""mustHas"": mustHas, -+ ""slice"": slice, -+ ""mustSlice"": mustSlice, -+ ""concat"": concat, -+ ""dig"": dig, -+ ""chunk"": chunk, -+ ""mustChunk"": mustChunk, -+ -+ // Crypto: -+ ""bcrypt"": bcrypt, -+ ""htpasswd"": htpasswd, -+ ""genPrivateKey"": generatePrivateKey, -+ ""derivePassword"": derivePassword, -+ ""buildCustomCert"": buildCustomCertificate, -+ ""genCA"": generateCertificateAuthority, -+ ""genCAWithKey"": generateCertificateAuthorityWithPEMKey, -+ ""genSelfSignedCert"": generateSelfSignedCertificate, -+ ""genSelfSignedCertWithKey"": generateSelfSignedCertificateWithPEMKey, -+ ""genSignedCert"": generateSignedCertificate, -+ ""genSignedCertWithKey"": generateSignedCertificateWithPEMKey, -+ ""encryptAES"": encryptAES, -+ ""decryptAES"": decryptAES, -+ ""randBytes"": randBytes, -+ -+ // UUIDs: -+ ""uuidv4"": uuidv4, -+ -+ // SemVer: -+ ""semver"": semver, -+ ""semverCompare"": semverCompare, -+ -+ // Flow Control: -+ ""fail"": func(msg string) (string, error) { return """", errors.New(msg) }, -+ -+ // Regex -+ ""regexMatch"": regexMatch, -+ ""mustRegexMatch"": mustRegexMatch, -+ ""regexFindAll"": regexFindAll, -+ ""mustRegexFindAll"": mustRegexFindAll, -+ ""regexFind"": regexFind, -+ ""mustRegexFind"": mustRegexFind, -+ ""regexReplaceAll"": regexReplaceAll, -+ ""mustRegexReplaceAll"": mustRegexReplaceAll, -+ ""regexReplaceAllLiteral"": regexReplaceAllLiteral, -+ ""mustRegexReplaceAllLiteral"": mustRegexReplaceAllLiteral, -+ ""regexSplit"": regexSplit, -+ ""mustRegexSplit"": mustRegexSplit, -+ ""regexQuoteMeta"": regexQuoteMeta, -+ -+ // URLs: -+ ""urlParse"": urlParse, -+ ""urlJoin"": urlJoin, -+} -diff --git a/vendor/github.com/Masterminds/sprig/v3/go.mod b/vendor/github.com/Masterminds/sprig/v3/go.mod -new file mode 100644 -index 0000000000000..c2597092ac046 ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/go.mod -@@ -0,0 +1,16 @@ -+module github.com/Masterminds/sprig/v3 -+ -+go 1.13 -+ -+require ( -+ github.com/Masterminds/goutils v1.1.1 -+ github.com/Masterminds/semver/v3 v3.1.1 -+ github.com/google/uuid v1.1.1 -+ github.com/huandu/xstrings v1.3.1 -+ github.com/imdario/mergo v0.3.11 -+ github.com/mitchellh/copystructure v1.0.0 -+ github.com/shopspring/decimal v1.2.0 -+ github.com/spf13/cast v1.3.1 -+ github.com/stretchr/testify v1.5.1 -+ golang.org/x/crypto v0.0.0-20200414173820-0848c9571904 -+) -diff --git a/vendor/github.com/Masterminds/sprig/v3/go.sum b/vendor/github.com/Masterminds/sprig/v3/go.sum -new file mode 100644 -index 0000000000000..b0e7f018b03b7 ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/go.sum -@@ -0,0 +1,52 @@ -+github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= -+github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -+github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= -+github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -+github.com/Masterminds/semver/v3 v3.1.0 h1:Y2lUDsFKVRSYGojLJ1yLxSXdMmMYTYls0rCvoqmMUQk= -+github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -+github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= -+github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -+github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -+github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs= -+github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -+github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= -+github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -+github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= -+github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -+github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc= -+github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -+github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= -+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -+github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -+github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -+github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= -+github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -+github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= -+github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -+github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= -+github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -+github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= -+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -+github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -+golang.org/x/crypto v0.0.0-20200414173820-0848c9571904 h1:bXoxMPcSLOq08zI3/c5dEBT6lE4eh+jOh886GHrn6V8= -+golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -diff --git a/vendor/github.com/Masterminds/sprig/v3/list.go b/vendor/github.com/Masterminds/sprig/v3/list.go -new file mode 100644 -index 0000000000000..ca0fbb7893289 ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/list.go -@@ -0,0 +1,464 @@ -+package sprig -+ -+import ( -+ ""fmt"" -+ ""math"" -+ ""reflect"" -+ ""sort"" -+) -+ -+// Reflection is used in these functions so that slices and arrays of strings, -+// ints, and other types not implementing []interface{} can be worked with. -+// For example, this is useful if you need to work on the output of regexs. -+ -+func list(v ...interface{}) []interface{} { -+ return v -+} -+ -+func push(list interface{}, v interface{}) []interface{} { -+ l, err := mustPush(list, v) -+ if err != nil { -+ panic(err) -+ } -+ -+ return l -+} -+ -+func mustPush(list interface{}, v interface{}) ([]interface{}, error) { -+ tp := reflect.TypeOf(list).Kind() -+ switch tp { -+ case reflect.Slice, reflect.Array: -+ l2 := reflect.ValueOf(list) -+ -+ l := l2.Len() -+ nl := make([]interface{}, l) -+ for i := 0; i < l; i++ { -+ nl[i] = l2.Index(i).Interface() -+ } -+ -+ return append(nl, v), nil -+ -+ default: -+ return nil, fmt.Errorf(""Cannot push on type %s"", tp) -+ } -+} -+ -+func prepend(list interface{}, v interface{}) []interface{} { -+ l, err := mustPrepend(list, v) -+ if err != nil { -+ panic(err) -+ } -+ -+ return l -+} -+ -+func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { -+ //return append([]interface{}{v}, list...) -+ -+ tp := reflect.TypeOf(list).Kind() -+ switch tp { -+ case reflect.Slice, reflect.Array: -+ l2 := reflect.ValueOf(list) -+ -+ l := l2.Len() -+ nl := make([]interface{}, l) -+ for i := 0; i < l; i++ { -+ nl[i] = l2.Index(i).Interface() -+ } -+ -+ return append([]interface{}{v}, nl...), nil -+ -+ default: -+ return nil, fmt.Errorf(""Cannot prepend on type %s"", tp) -+ } -+} -+ -+func chunk(size int, list interface{}) [][]interface{} { -+ l, err := mustChunk(size, list) -+ if err != nil { -+ panic(err) -+ } -+ -+ return l -+} -+ -+func mustChunk(size int, list interface{}) ([][]interface{}, error) { -+ tp := reflect.TypeOf(list).Kind() -+ switch tp { -+ case reflect.Slice, reflect.Array: -+ l2 := reflect.ValueOf(list) -+ -+ l := l2.Len() -+ -+ cs := int(math.Floor(float64(l-1)/float64(size)) + 1) -+ nl := make([][]interface{}, cs) -+ -+ for i := 0; i < cs; i++ { -+ clen := size -+ if i == cs-1 { -+ clen = int(math.Floor(math.Mod(float64(l), float64(size)))) -+ if clen == 0 { -+ clen = size -+ } -+ } -+ -+ nl[i] = make([]interface{}, clen) -+ -+ for j := 0; j < clen; j++ { -+ ix := i*size + j -+ nl[i][j] = l2.Index(ix).Interface() -+ } -+ } -+ -+ return nl, nil -+ -+ default: -+ return nil, fmt.Errorf(""Cannot chunk type %s"", tp) -+ } -+} -+ -+func last(list interface{}) interface{} { -+ l, err := mustLast(list) -+ if err != nil { -+ panic(err) -+ } -+ -+ return l -+} -+ -+func mustLast(list interface{}) (interface{}, error) { -+ tp := reflect.TypeOf(list).Kind() -+ switch tp { -+ case reflect.Slice, reflect.Array: -+ l2 := reflect.ValueOf(list) -+ -+ l := l2.Len() -+ if l == 0 { -+ return nil, nil -+ } -+ -+ return l2.Index(l - 1).Interface(), nil -+ default: -+ return nil, fmt.Errorf(""Cannot find last on type %s"", tp) -+ } -+} -+ -+func first(list interface{}) interface{} { -+ l, err := mustFirst(list) -+ if err != nil { -+ panic(err) -+ } -+ -+ return l -+} -+ -+func mustFirst(list interface{}) (interface{}, error) { -+ tp := reflect.TypeOf(list).Kind() -+ switch tp { -+ case reflect.Slice, reflect.Array: -+ l2 := reflect.ValueOf(list) -+ -+ l := l2.Len() -+ if l == 0 { -+ return nil, nil -+ } -+ -+ return l2.Index(0).Interface(), nil -+ default: -+ return nil, fmt.Errorf(""Cannot find first on type %s"", tp) -+ } -+} -+ -+func rest(list interface{}) []interface{} { -+ l, err := mustRest(list) -+ if err != nil { -+ panic(err) -+ } -+ -+ return l -+} -+ -+func mustRest(list interface{}) ([]interface{}, error) { -+ tp := reflect.TypeOf(list).Kind() -+ switch tp { -+ case reflect.Slice, reflect.Array: -+ l2 := reflect.ValueOf(list) -+ -+ l := l2.Len() -+ if l == 0 { -+ return nil, nil -+ } -+ -+ nl := make([]interface{}, l-1) -+ for i := 1; i < l; i++ { -+ nl[i-1] = l2.Index(i).Interface() -+ } -+ -+ return nl, nil -+ default: -+ return nil, fmt.Errorf(""Cannot find rest on type %s"", tp) -+ } -+} -+ -+func initial(list interface{}) []interface{} { -+ l, err := mustInitial(list) -+ if err != nil { -+ panic(err) -+ } -+ -+ return l -+} -+ -+func mustInitial(list interface{}) ([]interface{}, error) { -+ tp := reflect.TypeOf(list).Kind() -+ switch tp { -+ case reflect.Slice, reflect.Array: -+ l2 := reflect.ValueOf(list) -+ -+ l := l2.Len() -+ if l == 0 { -+ return nil, nil -+ } -+ -+ nl := make([]interface{}, l-1) -+ for i := 0; i < l-1; i++ { -+ nl[i] = l2.Index(i).Interface() -+ } -+ -+ return nl, nil -+ default: -+ return nil, fmt.Errorf(""Cannot find initial on type %s"", tp) -+ } -+} -+ -+func sortAlpha(list interface{}) []string { -+ k := reflect.Indirect(reflect.ValueOf(list)).Kind() -+ switch k { -+ case reflect.Slice, reflect.Array: -+ a := strslice(list) -+ s := sort.StringSlice(a) -+ s.Sort() -+ return s -+ } -+ return []string{strval(list)} -+} -+ -+func reverse(v interface{}) []interface{} { -+ l, err := mustReverse(v) -+ if err != nil { -+ panic(err) -+ } -+ -+ return l -+} -+ -+func mustReverse(v interface{}) ([]interface{}, error) { -+ tp := reflect.TypeOf(v).Kind() -+ switch tp { -+ case reflect.Slice, reflect.Array: -+ l2 := reflect.ValueOf(v) -+ -+ l := l2.Len() -+ // We do not sort in place because the incoming array should not be altered. -+ nl := make([]interface{}, l) -+ for i := 0; i < l; i++ { -+ nl[l-i-1] = l2.Index(i).Interface() -+ } -+ -+ return nl, nil -+ default: -+ return nil, fmt.Errorf(""Cannot find reverse on type %s"", tp) -+ } -+} -+ -+func compact(list interface{}) []interface{} { -+ l, err := mustCompact(list) -+ if err != nil { -+ panic(err) -+ } -+ -+ return l -+} -+ -+func mustCompact(list interface{}) ([]interface{}, error) { -+ tp := reflect.TypeOf(list).Kind() -+ switch tp { -+ case reflect.Slice, reflect.Array: -+ l2 := reflect.ValueOf(list) -+ -+ l := l2.Len() -+ nl := []interface{}{} -+ var item interface{} -+ for i := 0; i < l; i++ { -+ item = l2.Index(i).Interface() -+ if !empty(item) { -+ nl = append(nl, item) -+ } -+ } -+ -+ return nl, nil -+ default: -+ return nil, fmt.Errorf(""Cannot compact on type %s"", tp) -+ } -+} -+ -+func uniq(list interface{}) []interface{} { -+ l, err := mustUniq(list) -+ if err != nil { -+ panic(err) -+ } -+ -+ return l -+} -+ -+func mustUniq(list interface{}) ([]interface{}, error) { -+ tp := reflect.TypeOf(list).Kind() -+ switch tp { -+ case reflect.Slice, reflect.Array: -+ l2 := reflect.ValueOf(list) -+ -+ l := l2.Len() -+ dest := []interface{}{} -+ var item interface{} -+ for i := 0; i < l; i++ { -+ item = l2.Index(i).Interface() -+ if !inList(dest, item) { -+ dest = append(dest, item) -+ } -+ } -+ -+ return dest, nil -+ default: -+ return nil, fmt.Errorf(""Cannot find uniq on type %s"", tp) -+ } -+} -+ -+func inList(haystack []interface{}, needle interface{}) bool { -+ for _, h := range haystack { -+ if reflect.DeepEqual(needle, h) { -+ return true -+ } -+ } -+ return false -+} -+ -+func without(list interface{}, omit ...interface{}) []interface{} { -+ l, err := mustWithout(list, omit...) -+ if err != nil { -+ panic(err) -+ } -+ -+ return l -+} -+ -+func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { -+ tp := reflect.TypeOf(list).Kind() -+ switch tp { -+ case reflect.Slice, reflect.Array: -+ l2 := reflect.ValueOf(list) -+ -+ l := l2.Len() -+ res := []interface{}{} -+ var item interface{} -+ for i := 0; i < l; i++ { -+ item = l2.Index(i).Interface() -+ if !inList(omit, item) { -+ res = append(res, item) -+ } -+ } -+ -+ return res, nil -+ default: -+ return nil, fmt.Errorf(""Cannot find without on type %s"", tp) -+ } -+} -+ -+func has(needle interface{}, haystack interface{}) bool { -+ l, err := mustHas(needle, haystack) -+ if err != nil { -+ panic(err) -+ } -+ -+ return l -+} -+ -+func mustHas(needle interface{}, haystack interface{}) (bool, error) { -+ if haystack == nil { -+ return false, nil -+ } -+ tp := reflect.TypeOf(haystack).Kind() -+ switch tp { -+ case reflect.Slice, reflect.Array: -+ l2 := reflect.ValueOf(haystack) -+ var item interface{} -+ l := l2.Len() -+ for i := 0; i < l; i++ { -+ item = l2.Index(i).Interface() -+ if reflect.DeepEqual(needle, item) { -+ return true, nil -+ } -+ } -+ -+ return false, nil -+ default: -+ return false, fmt.Errorf(""Cannot find has on type %s"", tp) -+ } -+} -+ -+// $list := [1, 2, 3, 4, 5] -+// slice $list -> list[0:5] = list[:] -+// slice $list 0 3 -> list[0:3] = list[:3] -+// slice $list 3 5 -> list[3:5] -+// slice $list 3 -> list[3:5] = list[3:] -+func slice(list interface{}, indices ...interface{}) interface{} { -+ l, err := mustSlice(list, indices...) -+ if err != nil { -+ panic(err) -+ } -+ -+ return l -+} -+ -+func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { -+ tp := reflect.TypeOf(list).Kind() -+ switch tp { -+ case reflect.Slice, reflect.Array: -+ l2 := reflect.ValueOf(list) -+ -+ l := l2.Len() -+ if l == 0 { -+ return nil, nil -+ } -+ -+ var start, end int -+ if len(indices) > 0 { -+ start = toInt(indices[0]) -+ } -+ if len(indices) < 2 { -+ end = l -+ } else { -+ end = toInt(indices[1]) -+ } -+ -+ return l2.Slice(start, end).Interface(), nil -+ default: -+ return nil, fmt.Errorf(""list should be type of slice or array but %s"", tp) -+ } -+} -+ -+func concat(lists ...interface{}) interface{} { -+ var res []interface{} -+ for _, list := range lists { -+ tp := reflect.TypeOf(list).Kind() -+ switch tp { -+ case reflect.Slice, reflect.Array: -+ l2 := reflect.ValueOf(list) -+ for i := 0; i < l2.Len(); i++ { -+ res = append(res, l2.Index(i).Interface()) -+ } -+ default: -+ panic(fmt.Sprintf(""Cannot concat type %s as list"", tp)) -+ } -+ } -+ return res -+} -diff --git a/vendor/github.com/Masterminds/sprig/v3/network.go b/vendor/github.com/Masterminds/sprig/v3/network.go -new file mode 100644 -index 0000000000000..108d78a94627c ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/network.go -@@ -0,0 +1,12 @@ -+package sprig -+ -+import ( -+ ""math/rand"" -+ ""net"" -+) -+ -+func getHostByName(name string) string { -+ addrs, _ := net.LookupHost(name) -+ //TODO: add error handing when release v3 comes out -+ return addrs[rand.Intn(len(addrs))] -+} -diff --git a/vendor/github.com/Masterminds/sprig/v3/numeric.go b/vendor/github.com/Masterminds/sprig/v3/numeric.go -new file mode 100644 -index 0000000000000..f68e4182ee60e ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/numeric.go -@@ -0,0 +1,186 @@ -+package sprig -+ -+import ( -+ ""fmt"" -+ ""math"" -+ ""strconv"" -+ ""strings"" -+ -+ ""github.com/spf13/cast"" -+ ""github.com/shopspring/decimal"" -+) -+ -+// toFloat64 converts 64-bit floats -+func toFloat64(v interface{}) float64 { -+ return cast.ToFloat64(v) -+} -+ -+func toInt(v interface{}) int { -+ return cast.ToInt(v) -+} -+ -+// toInt64 converts integer types to 64-bit integers -+func toInt64(v interface{}) int64 { -+ return cast.ToInt64(v) -+} -+ -+func max(a interface{}, i ...interface{}) int64 { -+ aa := toInt64(a) -+ for _, b := range i { -+ bb := toInt64(b) -+ if bb > aa { -+ aa = bb -+ } -+ } -+ return aa -+} -+ -+func maxf(a interface{}, i ...interface{}) float64 { -+ aa := toFloat64(a) -+ for _, b := range i { -+ bb := toFloat64(b) -+ aa = math.Max(aa, bb) -+ } -+ return aa -+} -+ -+func min(a interface{}, i ...interface{}) int64 { -+ aa := toInt64(a) -+ for _, b := range i { -+ bb := toInt64(b) -+ if bb < aa { -+ aa = bb -+ } -+ } -+ return aa -+} -+ -+func minf(a interface{}, i ...interface{}) float64 { -+ aa := toFloat64(a) -+ for _, b := range i { -+ bb := toFloat64(b) -+ aa = math.Min(aa, bb) -+ } -+ return aa -+} -+ -+func until(count int) []int { -+ step := 1 -+ if count < 0 { -+ step = -1 -+ } -+ return untilStep(0, count, step) -+} -+ -+func untilStep(start, stop, step int) []int { -+ v := []int{} -+ -+ if stop < start { -+ if step >= 0 { -+ return v -+ } -+ for i := start; i > stop; i += step { -+ v = append(v, i) -+ } -+ return v -+ } -+ -+ if step <= 0 { -+ return v -+ } -+ for i := start; i < stop; i += step { -+ v = append(v, i) -+ } -+ return v -+} -+ -+func floor(a interface{}) float64 { -+ aa := toFloat64(a) -+ return math.Floor(aa) -+} -+ -+func ceil(a interface{}) float64 { -+ aa := toFloat64(a) -+ return math.Ceil(aa) -+} -+ -+func round(a interface{}, p int, rOpt ...float64) float64 { -+ roundOn := .5 -+ if len(rOpt) > 0 { -+ roundOn = rOpt[0] -+ } -+ val := toFloat64(a) -+ places := toFloat64(p) -+ -+ var round float64 -+ pow := math.Pow(10, places) -+ digit := pow * val -+ _, div := math.Modf(digit) -+ if div >= roundOn { -+ round = math.Ceil(digit) -+ } else { -+ round = math.Floor(digit) -+ } -+ return round / pow -+} -+ -+// converts unix octal to decimal -+func toDecimal(v interface{}) int64 { -+ result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) -+ if err != nil { -+ return 0 -+ } -+ return result -+} -+ -+func seq(params ...int) string { -+ increment := 1 -+ switch len(params) { -+ case 0: -+ return """" -+ case 1: -+ start := 1 -+ end := params[0] -+ if end < start { -+ increment = -1 -+ } -+ return intArrayToString(untilStep(start, end+increment, increment), "" "") -+ case 3: -+ start := params[0] -+ end := params[2] -+ step := params[1] -+ if end < start { -+ increment = -1 -+ if step > 0 { -+ return """" -+ } -+ } -+ return intArrayToString(untilStep(start, end+increment, step), "" "") -+ case 2: -+ start := params[0] -+ end := params[1] -+ step := 1 -+ if end < start { -+ step = -1 -+ } -+ return intArrayToString(untilStep(start, end+step, step), "" "") -+ default: -+ return """" -+ } -+} -+ -+func intArrayToString(slice []int, delimeter string) string { -+ return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), ""[]"") -+} -+ -+// performs a float and subsequent decimal.Decimal conversion on inputs, -+// and iterates through a and b executing the mathmetical operation f -+func execDecimalOp(a interface{}, b []interface{}, f func(d1, d2 decimal.Decimal) decimal.Decimal) float64 { -+ prt := decimal.NewFromFloat(toFloat64(a)) -+ for _, x := range b { -+ dx := decimal.NewFromFloat(toFloat64(x)) -+ prt = f(prt, dx) -+ } -+ rslt, _ := prt.Float64() -+ return rslt -+} -diff --git a/vendor/github.com/Masterminds/sprig/v3/reflect.go b/vendor/github.com/Masterminds/sprig/v3/reflect.go -new file mode 100644 -index 0000000000000..8a65c132f08f2 ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/reflect.go -@@ -0,0 +1,28 @@ -+package sprig -+ -+import ( -+ ""fmt"" -+ ""reflect"" -+) -+ -+// typeIs returns true if the src is the type named in target. -+func typeIs(target string, src interface{}) bool { -+ return target == typeOf(src) -+} -+ -+func typeIsLike(target string, src interface{}) bool { -+ t := typeOf(src) -+ return target == t || ""*""+target == t -+} -+ -+func typeOf(src interface{}) string { -+ return fmt.Sprintf(""%T"", src) -+} -+ -+func kindIs(target string, src interface{}) bool { -+ return target == kindOf(src) -+} -+ -+func kindOf(src interface{}) string { -+ return reflect.ValueOf(src).Kind().String() -+} -diff --git a/vendor/github.com/Masterminds/sprig/v3/regex.go b/vendor/github.com/Masterminds/sprig/v3/regex.go -new file mode 100644 -index 0000000000000..fab5510189775 ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/regex.go -@@ -0,0 +1,83 @@ -+package sprig -+ -+import ( -+ ""regexp"" -+) -+ -+func regexMatch(regex string, s string) bool { -+ match, _ := regexp.MatchString(regex, s) -+ return match -+} -+ -+func mustRegexMatch(regex string, s string) (bool, error) { -+ return regexp.MatchString(regex, s) -+} -+ -+func regexFindAll(regex string, s string, n int) []string { -+ r := regexp.MustCompile(regex) -+ return r.FindAllString(s, n) -+} -+ -+func mustRegexFindAll(regex string, s string, n int) ([]string, error) { -+ r, err := regexp.Compile(regex) -+ if err != nil { -+ return []string{}, err -+ } -+ return r.FindAllString(s, n), nil -+} -+ -+func regexFind(regex string, s string) string { -+ r := regexp.MustCompile(regex) -+ return r.FindString(s) -+} -+ -+func mustRegexFind(regex string, s string) (string, error) { -+ r, err := regexp.Compile(regex) -+ if err != nil { -+ return """", err -+ } -+ return r.FindString(s), nil -+} -+ -+func regexReplaceAll(regex string, s string, repl string) string { -+ r := regexp.MustCompile(regex) -+ return r.ReplaceAllString(s, repl) -+} -+ -+func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { -+ r, err := regexp.Compile(regex) -+ if err != nil { -+ return """", err -+ } -+ return r.ReplaceAllString(s, repl), nil -+} -+ -+func regexReplaceAllLiteral(regex string, s string, repl string) string { -+ r := regexp.MustCompile(regex) -+ return r.ReplaceAllLiteralString(s, repl) -+} -+ -+func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { -+ r, err := regexp.Compile(regex) -+ if err != nil { -+ return """", err -+ } -+ return r.ReplaceAllLiteralString(s, repl), nil -+} -+ -+func regexSplit(regex string, s string, n int) []string { -+ r := regexp.MustCompile(regex) -+ return r.Split(s, n) -+} -+ -+func mustRegexSplit(regex string, s string, n int) ([]string, error) { -+ r, err := regexp.Compile(regex) -+ if err != nil { -+ return []string{}, err -+ } -+ return r.Split(s, n), nil -+} -+ -+func regexQuoteMeta(s string) string { -+ return regexp.QuoteMeta(s) -+} -diff --git a/vendor/github.com/Masterminds/sprig/v3/semver.go b/vendor/github.com/Masterminds/sprig/v3/semver.go -new file mode 100644 -index 0000000000000..3fbe08aa63762 ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/semver.go -@@ -0,0 +1,23 @@ -+package sprig -+ -+import ( -+ sv2 ""github.com/Masterminds/semver/v3"" -+) -+ -+func semverCompare(constraint, version string) (bool, error) { -+ c, err := sv2.NewConstraint(constraint) -+ if err != nil { -+ return false, err -+ } -+ -+ v, err := sv2.NewVersion(version) -+ if err != nil { -+ return false, err -+ } -+ -+ return c.Check(v), nil -+} -+ -+func semver(version string) (*sv2.Version, error) { -+ return sv2.NewVersion(version) -+} -diff --git a/vendor/github.com/Masterminds/sprig/v3/strings.go b/vendor/github.com/Masterminds/sprig/v3/strings.go -new file mode 100644 -index 0000000000000..e0ae628c8417d ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/strings.go -@@ -0,0 +1,236 @@ -+package sprig -+ -+import ( -+ ""encoding/base32"" -+ ""encoding/base64"" -+ ""fmt"" -+ ""reflect"" -+ ""strconv"" -+ ""strings"" -+ -+ util ""github.com/Masterminds/goutils"" -+) -+ -+func base64encode(v string) string { -+ return base64.StdEncoding.EncodeToString([]byte(v)) -+} -+ -+func base64decode(v string) string { -+ data, err := base64.StdEncoding.DecodeString(v) -+ if err != nil { -+ return err.Error() -+ } -+ return string(data) -+} -+ -+func base32encode(v string) string { -+ return base32.StdEncoding.EncodeToString([]byte(v)) -+} -+ -+func base32decode(v string) string { -+ data, err := base32.StdEncoding.DecodeString(v) -+ if err != nil { -+ return err.Error() -+ } -+ return string(data) -+} -+ -+func abbrev(width int, s string) string { -+ if width < 4 { -+ return s -+ } -+ r, _ := util.Abbreviate(s, width) -+ return r -+} -+ -+func abbrevboth(left, right int, s string) string { -+ if right < 4 || left > 0 && right < 7 { -+ return s -+ } -+ r, _ := util.AbbreviateFull(s, left, right) -+ return r -+} -+func initials(s string) string { -+ // Wrap this just to eliminate the var args, which templates don't do well. -+ return util.Initials(s) -+} -+ -+func randAlphaNumeric(count int) string { -+ // It is not possible, it appears, to actually generate an error here. -+ r, _ := util.CryptoRandomAlphaNumeric(count) -+ return r -+} -+ -+func randAlpha(count int) string { -+ r, _ := util.CryptoRandomAlphabetic(count) -+ return r -+} -+ -+func randAscii(count int) string { -+ r, _ := util.CryptoRandomAscii(count) -+ return r -+} -+ -+func randNumeric(count int) string { -+ r, _ := util.CryptoRandomNumeric(count) -+ return r -+} -+ -+func untitle(str string) string { -+ return util.Uncapitalize(str) -+} -+ -+func quote(str ...interface{}) string { -+ out := make([]string, 0, len(str)) -+ for _, s := range str { -+ if s != nil { -+ out = append(out, fmt.Sprintf(""%q"", strval(s))) -+ } -+ } -+ return strings.Join(out, "" "") -+} -+ -+func squote(str ...interface{}) string { -+ out := make([]string, 0, len(str)) -+ for _, s := range str { -+ if s != nil { -+ out = append(out, fmt.Sprintf(""'%v'"", s)) -+ } -+ } -+ return strings.Join(out, "" "") -+} -+ -+func cat(v ...interface{}) string { -+ v = removeNilElements(v) -+ r := strings.TrimSpace(strings.Repeat(""%v "", len(v))) -+ return fmt.Sprintf(r, v...) -+} -+ -+func indent(spaces int, v string) string { -+ pad := strings.Repeat("" "", spaces) -+ return pad + strings.Replace(v, ""\n"", ""\n""+pad, -1) -+} -+ -+func nindent(spaces int, v string) string { -+ return ""\n"" + indent(spaces, v) -+} -+ -+func replace(old, new, src string) string { -+ return strings.Replace(src, old, new, -1) -+} -+ -+func plural(one, many string, count int) string { -+ if count == 1 { -+ return one -+ } -+ return many -+} -+ -+func strslice(v interface{}) []string { -+ switch v := v.(type) { -+ case []string: -+ return v -+ case []interface{}: -+ b := make([]string, 0, len(v)) -+ for _, s := range v { -+ if s != nil { -+ b = append(b, strval(s)) -+ } -+ } -+ return b -+ default: -+ val := reflect.ValueOf(v) -+ switch val.Kind() { -+ case reflect.Array, reflect.Slice: -+ l := val.Len() -+ b := make([]string, 0, l) -+ for i := 0; i < l; i++ { -+ value := val.Index(i).Interface() -+ if value != nil { -+ b = append(b, strval(value)) -+ } -+ } -+ return b -+ default: -+ if v == nil { -+ return []string{} -+ } -+ -+ return []string{strval(v)} -+ } -+ } -+} -+ -+func removeNilElements(v []interface{}) []interface{} { -+ newSlice := make([]interface{}, 0, len(v)) -+ for _, i := range v { -+ if i != nil { -+ newSlice = append(newSlice, i) -+ } -+ } -+ return newSlice -+} -+ -+func strval(v interface{}) string { -+ switch v := v.(type) { -+ case string: -+ return v -+ case []byte: -+ return string(v) -+ case error: -+ return v.Error() -+ case fmt.Stringer: -+ return v.String() -+ default: -+ return fmt.Sprintf(""%v"", v) -+ } -+} -+ -+func trunc(c int, s string) string { -+ if c < 0 && len(s)+c > 0 { -+ return s[len(s)+c:] -+ } -+ if c >= 0 && len(s) > c { -+ return s[:c] -+ } -+ return s -+} -+ -+func join(sep string, v interface{}) string { -+ return strings.Join(strslice(v), sep) -+} -+ -+func split(sep, orig string) map[string]string { -+ parts := strings.Split(orig, sep) -+ res := make(map[string]string, len(parts)) -+ for i, v := range parts { -+ res[""_""+strconv.Itoa(i)] = v -+ } -+ return res -+} -+ -+func splitn(sep string, n int, orig string) map[string]string { -+ parts := strings.SplitN(orig, sep, n) -+ res := make(map[string]string, len(parts)) -+ for i, v := range parts { -+ res[""_""+strconv.Itoa(i)] = v -+ } -+ return res -+} -+ -+// substring creates a substring of the given string. -+// -+// If start is < 0, this calls string[:end]. -+// -+// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] -+// -+// Otherwise, this calls string[start, end]. -+func substring(start, end int, s string) string { -+ if start < 0 { -+ return s[:end] -+ } -+ if end < 0 || end > len(s) { -+ return s[start:] -+ } -+ return s[start:end] -+} -diff --git a/vendor/github.com/Masterminds/sprig/v3/url.go b/vendor/github.com/Masterminds/sprig/v3/url.go -new file mode 100644 -index 0000000000000..b8e120e19ba40 ---- /dev/null -+++ b/vendor/github.com/Masterminds/sprig/v3/url.go -@@ -0,0 +1,66 @@ -+package sprig -+ -+import ( -+ ""fmt"" -+ ""net/url"" -+ ""reflect"" -+) -+ -+func dictGetOrEmpty(dict map[string]interface{}, key string) string { -+ value, ok := dict[key] -+ if !ok { -+ return """" -+ } -+ tp := reflect.TypeOf(value).Kind() -+ if tp != reflect.String { -+ panic(fmt.Sprintf(""unable to parse %s key, must be of type string, but %s found"", key, tp.String())) -+ } -+ return reflect.ValueOf(value).String() -+} -+ -+// parses given URL to return dict object -+func urlParse(v string) map[string]interface{} { -+ dict := map[string]interface{}{} -+ parsedURL, err := url.Parse(v) -+ if err != nil { -+ panic(fmt.Sprintf(""unable to parse url: %s"", err)) -+ } -+ dict[""scheme""] = parsedURL.Scheme -+ dict[""host""] = parsedURL.Host -+ dict[""hostname""] = parsedURL.Hostname() -+ dict[""path""] = parsedURL.Path -+ dict[""query""] = parsedURL.RawQuery -+ dict[""opaque""] = parsedURL.Opaque -+ dict[""fragment""] = parsedURL.Fragment -+ if parsedURL.User != nil { -+ dict[""userinfo""] = parsedURL.User.String() -+ } else { -+ dict[""userinfo""] = """" -+ } -+ -+ return dict -+} -+ -+// join given dict to URL string -+func urlJoin(d map[string]interface{}) string { -+ resURL := url.URL{ -+ Scheme: dictGetOrEmpty(d, ""scheme""), -+ Host: dictGetOrEmpty(d, ""host""), -+ Path: dictGetOrEmpty(d, ""path""), -+ RawQuery: dictGetOrEmpty(d, ""query""), -+ Opaque: dictGetOrEmpty(d, ""opaque""), -+ Fragment: dictGetOrEmpty(d, ""fragment""), -+ } -+ userinfo := dictGetOrEmpty(d, ""userinfo"") -+ var user *url.Userinfo -+ if userinfo != """" { -+ tempURL, err := url.Parse(fmt.Sprintf(""proto://%s@host"", userinfo)) -+ if err != nil { -+ panic(fmt.Sprintf(""unable to parse userinfo in dict: %s"", err)) -+ } -+ user = tempURL.User -+ } -+ -+ resURL.User = user -+ return resURL.String() -+} -diff --git a/vendor/github.com/huandu/xstrings/.gitignore b/vendor/github.com/huandu/xstrings/.gitignore -new file mode 100644 -index 0000000000000..daf913b1b347a ---- /dev/null -+++ b/vendor/github.com/huandu/xstrings/.gitignore -@@ -0,0 +1,24 @@ -+# Compiled Object files, Static and Dynamic libs (Shared Objects) -+*.o -+*.a -+*.so -+ -+# Folders -+_obj -+_test -+ -+# Architecture specific extensions/prefixes -+*.[568vq] -+[568vq].out -+ -+*.cgo1.go -+*.cgo2.c -+_cgo_defun.c -+_cgo_gotypes.go -+_cgo_export.* -+ -+_testmain.go -+ -+*.exe -+*.test -+*.prof -diff --git a/vendor/github.com/huandu/xstrings/.travis.yml b/vendor/github.com/huandu/xstrings/.travis.yml -new file mode 100644 -index 0000000000000..d6460be411e57 ---- /dev/null -+++ b/vendor/github.com/huandu/xstrings/.travis.yml -@@ -0,0 +1,7 @@ -+language: go -+install: -+ - go get golang.org/x/tools/cmd/cover -+ - go get github.com/mattn/goveralls -+script: -+ - go test -v -covermode=count -coverprofile=coverage.out -+ - 'if [ ""$TRAVIS_PULL_REQUEST"" = ""false"" ] && [ ! -z ""$COVERALLS_TOKEN"" ]; then $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN; fi' -diff --git a/vendor/github.com/huandu/xstrings/CONTRIBUTING.md b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md -new file mode 100644 -index 0000000000000..d7b4b8d584b77 ---- /dev/null -+++ b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md -@@ -0,0 +1,23 @@ -+# Contributing # -+ -+Thanks for your contribution in advance. No matter what you will contribute to this project, pull request or bug report or feature discussion, it's always highly appreciated. -+ -+## New API or feature ## -+ -+I want to speak more about how to add new functions to this package. -+ -+Package `xstring` is a collection of useful string functions which should be implemented in Go. It's a bit subject to say which function should be included and which should not. I set up following rules in order to make it clear and as objective as possible. -+ -+* Rule 1: Only string algorithm, which takes string as input, can be included. -+* Rule 2: If a function has been implemented in package `string`, it must not be included. -+* Rule 3: If a function is not language neutral, it must not be included. -+* Rule 4: If a function is a part of standard library in other languages, it can be included. -+* Rule 5: If a function is quite useful in some famous framework or library, it can be included. -+ -+New function must be discussed in project issues before submitting any code. If a pull request with new functions is sent without any ref issue, it will be rejected. -+ -+## Pull request ## -+ -+Pull request is always welcome. Just make sure you have run `go fmt` and all test cases passed before submit. -+ -+If the pull request is to add a new API or feature, don't forget to update README.md and add new API in function list. -diff --git a/vendor/github.com/huandu/xstrings/LICENSE b/vendor/github.com/huandu/xstrings/LICENSE -new file mode 100644 -index 0000000000000..270177259365d ---- /dev/null -+++ b/vendor/github.com/huandu/xstrings/LICENSE -@@ -0,0 +1,22 @@ -+The MIT License (MIT) -+ -+Copyright (c) 2015 Huan Du -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the ""Software""), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in all -+copies or substantial portions of the Software. -+ -+THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -+SOFTWARE. -+ -diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md -new file mode 100644 -index 0000000000000..292bf2f39e13d ---- /dev/null -+++ b/vendor/github.com/huandu/xstrings/README.md -@@ -0,0 +1,117 @@ -+# xstrings # -+ -+[![Build Status](https://travis-ci.org/huandu/xstrings.svg?branch=master)](https://travis-ci.org/huandu/xstrings) -+[![GoDoc](https://godoc.org/github.com/huandu/xstrings?status.svg)](https://godoc.org/github.com/huandu/xstrings) -+[![Go Report](https://goreportcard.com/badge/github.com/huandu/xstrings)](https://goreportcard.com/report/github.com/huandu/xstrings) -+[![Coverage Status](https://coveralls.io/repos/github/huandu/xstrings/badge.svg?branch=master)](https://coveralls.io/github/huandu/xstrings?branch=master) -+ -+Go package [xstrings](https://godoc.org/github.com/huandu/xstrings) is a collection of string functions, which are widely used in other languages but absent in Go package [strings](http://golang.org/pkg/strings). -+ -+All functions are well tested and carefully tuned for performance. -+ -+## Propose a new function ## -+ -+Please review [contributing guideline](CONTRIBUTING.md) and [create new issue](https://github.com/huandu/xstrings/issues) to state why it should be included. -+ -+## Install ## -+ -+Use `go get` to install this library. -+ -+ go get github.com/huandu/xstrings -+ -+## API document ## -+ -+See [GoDoc](https://godoc.org/github.com/huandu/xstrings) for full document. -+ -+## Function list ## -+ -+Go functions have a unique naming style. One, who has experience in other language but new in Go, may have difficulties to find out right string function to use. -+ -+Here is a list of functions in [strings](http://golang.org/pkg/strings) and [xstrings](https://godoc.org/github.com/huandu/xstrings) with enough extra information about how to map these functions to their friends in other languages. Hope this list could be helpful for fresh gophers. -+ -+### Package `xstrings` functions ### -+ -+*Keep this table sorted by Function in ascending order.* -+ -+| Function | Friends | # | -+| -------- | ------- | --- | -+| [Center](https://godoc.org/github.com/huandu/xstrings#Center) | `str.center` in Python; `String#center` in Ruby | [#30](https://github.com/huandu/xstrings/issues/30) | -+| [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) | -+| [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) | -+| [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) | -+| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | -+| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | -+| [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) | -+| [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) | -+| [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) | -+| [Len](https://godoc.org/github.com/huandu/xstrings#Len) | `mb_strlen` in PHP | [#23](https://github.com/huandu/xstrings/issues/23) | -+| [Partition](https://godoc.org/github.com/huandu/xstrings#Partition) | `str.partition` in Python; `String#partition` in Ruby | [#10](https://github.com/huandu/xstrings/issues/10) | -+| [Reverse](https://godoc.org/github.com/huandu/xstrings#Reverse) | `String#reverse` in Ruby; `strrev` in PHP; `reverse` in Perl | [#7](https://github.com/huandu/xstrings/issues/7) | -+| [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) | -+| [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) | -+| [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) | -+| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | -+| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | -+| [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) | -+| [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) | -+| [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) | -+| [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) | -+| [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | -+| [ToKebab](https://godoc.org/github.com/huandu/xstrings#ToKebabCase) | - | [#41](https://github.com/huandu/xstrings/issues/41) | -+| [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | -+| [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) | -+| [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) | -+| [WordCount](https://godoc.org/github.com/huandu/xstrings#WordCount) | `str_word_count` in PHP | [#14](https://github.com/huandu/xstrings/issues/14) | -+| [WordSplit](https://godoc.org/github.com/huandu/xstrings#WordSplit) | - | [#14](https://github.com/huandu/xstrings/issues/14) | -+ -+### Package `strings` functions ### -+ -+*Keep this table sorted by Function in ascending order.* -+ -+| Function | Friends | -+| -------- | ------- | -+| [Contains](http://golang.org/pkg/strings/#Contains) | `String#include?` in Ruby | -+| [ContainsAny](http://golang.org/pkg/strings/#ContainsAny) | - | -+| [ContainsRune](http://golang.org/pkg/strings/#ContainsRune) | - | -+| [Count](http://golang.org/pkg/strings/#Count) | `str.count` in Python; `substr_count` in PHP | -+| [EqualFold](http://golang.org/pkg/strings/#EqualFold) | `stricmp` in PHP; `String#casecmp` in Ruby | -+| [Fields](http://golang.org/pkg/strings/#Fields) | `str.split` in Python; `split` in Perl; `String#split` in Ruby | -+| [FieldsFunc](http://golang.org/pkg/strings/#FieldsFunc) | - | -+| [HasPrefix](http://golang.org/pkg/strings/#HasPrefix) | `str.startswith` in Python; `String#start_with?` in Ruby | -+| [HasSuffix](http://golang.org/pkg/strings/#HasSuffix) | `str.endswith` in Python; `String#end_with?` in Ruby | -+| [Index](http://golang.org/pkg/strings/#Index) | `str.index` in Python; `String#index` in Ruby; `strpos` in PHP; `index` in Perl | -+| [IndexAny](http://golang.org/pkg/strings/#IndexAny) | - | -+| [IndexByte](http://golang.org/pkg/strings/#IndexByte) | - | -+| [IndexFunc](http://golang.org/pkg/strings/#IndexFunc) | - | -+| [IndexRune](http://golang.org/pkg/strings/#IndexRune) | - | -+| [Join](http://golang.org/pkg/strings/#Join) | `str.join` in Python; `Array#join` in Ruby; `implode` in PHP; `join` in Perl | -+| [LastIndex](http://golang.org/pkg/strings/#LastIndex) | `str.rindex` in Python; `String#rindex`; `strrpos` in PHP; `rindex` in Perl | -+| [LastIndexAny](http://golang.org/pkg/strings/#LastIndexAny) | - | -+| [LastIndexFunc](http://golang.org/pkg/strings/#LastIndexFunc) | - | -+| [Map](http://golang.org/pkg/strings/#Map) | `String#each_codepoint` in Ruby | -+| [Repeat](http://golang.org/pkg/strings/#Repeat) | operator `*` in Python and Ruby; `str_repeat` in PHP | -+| [Replace](http://golang.org/pkg/strings/#Replace) | `str.replace` in Python; `String#sub` in Ruby; `str_replace` in PHP | -+| [Split](http://golang.org/pkg/strings/#Split) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | -+| [SplitAfter](http://golang.org/pkg/strings/#SplitAfter) | - | -+| [SplitAfterN](http://golang.org/pkg/strings/#SplitAfterN) | - | -+| [SplitN](http://golang.org/pkg/strings/#SplitN) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | -+| [Title](http://golang.org/pkg/strings/#Title) | `str.title` in Python | -+| [ToLower](http://golang.org/pkg/strings/#ToLower) | `str.lower` in Python; `String#downcase` in Ruby; `strtolower` in PHP; `lc` in Perl | -+| [ToLowerSpecial](http://golang.org/pkg/strings/#ToLowerSpecial) | - | -+| [ToTitle](http://golang.org/pkg/strings/#ToTitle) | - | -+| [ToTitleSpecial](http://golang.org/pkg/strings/#ToTitleSpecial) | - | -+| [ToUpper](http://golang.org/pkg/strings/#ToUpper) | `str.upper` in Python; `String#upcase` in Ruby; `strtoupper` in PHP; `uc` in Perl | -+| [ToUpperSpecial](http://golang.org/pkg/strings/#ToUpperSpecial) | - | -+| [Trim](http://golang.org/pkg/strings/#Trim) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | -+| [TrimFunc](http://golang.org/pkg/strings/#TrimFunc) | - | -+| [TrimLeft](http://golang.org/pkg/strings/#TrimLeft) | `str.lstrip` in Python; `String#lstrip` in Ruby; `ltrim` in PHP | -+| [TrimLeftFunc](http://golang.org/pkg/strings/#TrimLeftFunc) | - | -+| [TrimPrefix](http://golang.org/pkg/strings/#TrimPrefix) | - | -+| [TrimRight](http://golang.org/pkg/strings/#TrimRight) | `str.rstrip` in Python; `String#rstrip` in Ruby; `rtrim` in PHP | -+| [TrimRightFunc](http://golang.org/pkg/strings/#TrimRightFunc) | - | -+| [TrimSpace](http://golang.org/pkg/strings/#TrimSpace) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | -+| [TrimSuffix](http://golang.org/pkg/strings/#TrimSuffix) | `String#chomp` in Ruby; `chomp` in Perl | -+ -+## License ## -+ -+This library is licensed under MIT license. See LICENSE for details. -diff --git a/vendor/github.com/huandu/xstrings/common.go b/vendor/github.com/huandu/xstrings/common.go -new file mode 100644 -index 0000000000000..2aff57aab4d62 ---- /dev/null -+++ b/vendor/github.com/huandu/xstrings/common.go -@@ -0,0 +1,25 @@ -+// Copyright 2015 Huan Du. All rights reserved. -+// Licensed under the MIT license that can be found in the LICENSE file. -+ -+package xstrings -+ -+import ( -+ ""bytes"" -+) -+ -+const bufferMaxInitGrowSize = 2048 -+ -+// Lazy initialize a buffer. -+func allocBuffer(orig, cur string) *bytes.Buffer { -+ output := &bytes.Buffer{} -+ maxSize := len(orig) * 4 -+ -+ // Avoid to reserve too much memory at once. -+ if maxSize > bufferMaxInitGrowSize { -+ maxSize = bufferMaxInitGrowSize -+ } -+ -+ output.Grow(maxSize) -+ output.WriteString(orig[:len(orig)-len(cur)]) -+ return output -+} -diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go -new file mode 100644 -index 0000000000000..3d58fa81ae0e1 ---- /dev/null -+++ b/vendor/github.com/huandu/xstrings/convert.go -@@ -0,0 +1,591 @@ -+// Copyright 2015 Huan Du. All rights reserved. -+// Licensed under the MIT license that can be found in the LICENSE file. -+ -+package xstrings -+ -+import ( -+ ""bytes"" -+ ""math/rand"" -+ ""unicode"" -+ ""unicode/utf8"" -+) -+ -+// ToCamelCase is to convert words separated by space, underscore and hyphen to camel case. -+// -+// Some samples. -+// ""some_words"" => ""SomeWords"" -+// ""http_server"" => ""HttpServer"" -+// ""no_https"" => ""NoHttps"" -+// ""_complex__case_"" => ""_Complex_Case_"" -+// ""some words"" => ""SomeWords"" -+func ToCamelCase(str string) string { -+ if len(str) == 0 { -+ return """" -+ } -+ -+ buf := &bytes.Buffer{} -+ var r0, r1 rune -+ var size int -+ -+ // leading connector will appear in output. -+ for len(str) > 0 { -+ r0, size = utf8.DecodeRuneInString(str) -+ str = str[size:] -+ -+ if !isConnector(r0) { -+ r0 = unicode.ToUpper(r0) -+ break -+ } -+ -+ buf.WriteRune(r0) -+ } -+ -+ if len(str) == 0 { -+ // A special case for a string contains only 1 rune. -+ if size != 0 { -+ buf.WriteRune(r0) -+ } -+ -+ return buf.String() -+ } -+ -+ for len(str) > 0 { -+ r1 = r0 -+ r0, size = utf8.DecodeRuneInString(str) -+ str = str[size:] -+ -+ if isConnector(r0) && isConnector(r1) { -+ buf.WriteRune(r1) -+ continue -+ } -+ -+ if isConnector(r1) { -+ r0 = unicode.ToUpper(r0) -+ } else { -+ r0 = unicode.ToLower(r0) -+ buf.WriteRune(r1) -+ } -+ } -+ -+ buf.WriteRune(r0) -+ return buf.String() -+} -+ -+// ToSnakeCase can convert all upper case characters in a string to -+// snake case format. -+// -+// Some samples. -+// ""FirstName"" => ""first_name"" -+// ""HTTPServer"" => ""http_server"" -+// ""NoHTTPS"" => ""no_https"" -+// ""GO_PATH"" => ""go_path"" -+// ""GO PATH"" => ""go_path"" // space is converted to underscore. -+// ""GO-PATH"" => ""go_path"" // hyphen is converted to underscore. -+// ""http2xx"" => ""http_2xx"" // insert an underscore before a number and after an alphabet. -+// ""HTTP20xOK"" => ""http_20x_ok"" -+// ""Duration2m3s"" => ""duration_2m3s"" -+// ""Bld4Floor3rd"" => ""bld4_floor_3rd"" -+func ToSnakeCase(str string) string { -+ return camelCaseToLowerCase(str, '_') -+} -+ -+// ToKebabCase can convert all upper case characters in a string to -+// kebab case format. -+// -+// Some samples. -+// ""FirstName"" => ""first-name"" -+// ""HTTPServer"" => ""http-server"" -+// ""NoHTTPS"" => ""no-https"" -+// ""GO_PATH"" => ""go-path"" -+// ""GO PATH"" => ""go-path"" // space is converted to '-'. -+// ""GO-PATH"" => ""go-path"" // hyphen is converted to '-'. -+// ""http2xx"" => ""http-2xx"" // insert an underscore before a number and after an alphabet. -+// ""HTTP20xOK"" => ""http-20x-ok"" -+// ""Duration2m3s"" => ""duration-2m3s"" -+// ""Bld4Floor3rd"" => ""bld4-floor-3rd"" -+func ToKebabCase(str string) string { -+ return camelCaseToLowerCase(str, '-') -+} -+ -+func camelCaseToLowerCase(str string, connector rune) string { -+ if len(str) == 0 { -+ return """" -+ } -+ -+ buf := &bytes.Buffer{} -+ wt, word, remaining := nextWord(str) -+ -+ for len(remaining) > 0 { -+ if wt != connectorWord { -+ toLower(buf, wt, word, connector) -+ } -+ -+ prev := wt -+ last := word -+ wt, word, remaining = nextWord(remaining) -+ -+ switch prev { -+ case numberWord: -+ for wt == alphabetWord || wt == numberWord { -+ toLower(buf, wt, word, connector) -+ wt, word, remaining = nextWord(remaining) -+ } -+ -+ if wt != invalidWord && wt != punctWord { -+ buf.WriteRune(connector) -+ } -+ -+ case connectorWord: -+ toLower(buf, prev, last, connector) -+ -+ case punctWord: -+ // nothing. -+ -+ default: -+ if wt != numberWord { -+ if wt != connectorWord && wt != punctWord { -+ buf.WriteRune(connector) -+ } -+ -+ break -+ } -+ -+ if len(remaining) == 0 { -+ break -+ } -+ -+ last := word -+ wt, word, remaining = nextWord(remaining) -+ -+ // consider number as a part of previous word. -+ // e.g. ""Bld4Floor"" => ""bld4_floor"" -+ if wt != alphabetWord { -+ toLower(buf, numberWord, last, connector) -+ -+ if wt != connectorWord && wt != punctWord { -+ buf.WriteRune(connector) -+ } -+ -+ break -+ } -+ -+ // if there are some lower case letters following a number, -+ // add connector before the number. -+ // e.g. ""HTTP2xx"" => ""http_2xx"" -+ buf.WriteRune(connector) -+ toLower(buf, numberWord, last, connector) -+ -+ for wt == alphabetWord || wt == numberWord { -+ toLower(buf, wt, word, connector) -+ wt, word, remaining = nextWord(remaining) -+ } -+ -+ if wt != invalidWord && wt != connectorWord && wt != punctWord { -+ buf.WriteRune(connector) -+ } -+ } -+ } -+ -+ toLower(buf, wt, word, connector) -+ return buf.String() -+} -+ -+func isConnector(r rune) bool { -+ return r == '-' || r == '_' || unicode.IsSpace(r) -+} -+ -+type wordType int -+ -+const ( -+ invalidWord wordType = iota -+ numberWord -+ upperCaseWord -+ alphabetWord -+ connectorWord -+ punctWord -+ otherWord -+) -+ -+func nextWord(str string) (wt wordType, word, remaining string) { -+ if len(str) == 0 { -+ return -+ } -+ -+ var offset int -+ remaining = str -+ r, size := nextValidRune(remaining, utf8.RuneError) -+ offset += size -+ -+ if r == utf8.RuneError { -+ wt = invalidWord -+ word = str[:offset] -+ remaining = str[offset:] -+ return -+ } -+ -+ switch { -+ case isConnector(r): -+ wt = connectorWord -+ remaining = remaining[size:] -+ -+ for len(remaining) > 0 { -+ r, size = nextValidRune(remaining, r) -+ -+ if !isConnector(r) { -+ break -+ } -+ -+ offset += size -+ remaining = remaining[size:] -+ } -+ -+ case unicode.IsPunct(r): -+ wt = punctWord -+ remaining = remaining[size:] -+ -+ for len(remaining) > 0 { -+ r, size = nextValidRune(remaining, r) -+ -+ if !unicode.IsPunct(r) { -+ break -+ } -+ -+ offset += size -+ remaining = remaining[size:] -+ } -+ -+ case unicode.IsUpper(r): -+ wt = upperCaseWord -+ remaining = remaining[size:] -+ -+ if len(remaining) == 0 { -+ break -+ } -+ -+ r, size = nextValidRune(remaining, r) -+ -+ switch { -+ case unicode.IsUpper(r): -+ prevSize := size -+ offset += size -+ remaining = remaining[size:] -+ -+ for len(remaining) > 0 { -+ r, size = nextValidRune(remaining, r) -+ -+ if !unicode.IsUpper(r) { -+ break -+ } -+ -+ prevSize = size -+ offset += size -+ remaining = remaining[size:] -+ } -+ -+ // it's a bit complex when dealing with a case like ""HTTPStatus"". -+ // it's expected to be splitted into ""HTTP"" and ""Status"". -+ // Therefore ""S"" should be in remaining instead of word. -+ if len(remaining) > 0 && isAlphabet(r) { -+ offset -= prevSize -+ remaining = str[offset:] -+ } -+ -+ case isAlphabet(r): -+ offset += size -+ remaining = remaining[size:] -+ -+ for len(remaining) > 0 { -+ r, size = nextValidRune(remaining, r) -+ -+ if !isAlphabet(r) || unicode.IsUpper(r) { -+ break -+ } -+ -+ offset += size -+ remaining = remaining[size:] -+ } -+ } -+ -+ case isAlphabet(r): -+ wt = alphabetWord -+ remaining = remaining[size:] -+ -+ for len(remaining) > 0 { -+ r, size = nextValidRune(remaining, r) -+ -+ if !isAlphabet(r) || unicode.IsUpper(r) { -+ break -+ } -+ -+ offset += size -+ remaining = remaining[size:] -+ } -+ -+ case unicode.IsNumber(r): -+ wt = numberWord -+ remaining = remaining[size:] -+ -+ for len(remaining) > 0 { -+ r, size = nextValidRune(remaining, r) -+ -+ if !unicode.IsNumber(r) { -+ break -+ } -+ -+ offset += size -+ remaining = remaining[size:] -+ } -+ -+ default: -+ wt = otherWord -+ remaining = remaining[size:] -+ -+ for len(remaining) > 0 { -+ r, size = nextValidRune(remaining, r) -+ -+ if size == 0 || isConnector(r) || isAlphabet(r) || unicode.IsNumber(r) || unicode.IsPunct(r) { -+ break -+ } -+ -+ offset += size -+ remaining = remaining[size:] -+ } -+ } -+ -+ word = str[:offset] -+ return -+} -+ -+func nextValidRune(str string, prev rune) (r rune, size int) { -+ var sz int -+ -+ for len(str) > 0 { -+ r, sz = utf8.DecodeRuneInString(str) -+ size += sz -+ -+ if r != utf8.RuneError { -+ return -+ } -+ -+ str = str[sz:] -+ } -+ -+ r = prev -+ return -+} -+ -+func toLower(buf *bytes.Buffer, wt wordType, str string, connector rune) { -+ buf.Grow(buf.Len() + len(str)) -+ -+ if wt != upperCaseWord && wt != connectorWord { -+ buf.WriteString(str) -+ return -+ } -+ -+ for len(str) > 0 { -+ r, size := utf8.DecodeRuneInString(str) -+ str = str[size:] -+ -+ if isConnector(r) { -+ buf.WriteRune(connector) -+ } else if unicode.IsUpper(r) { -+ buf.WriteRune(unicode.ToLower(r)) -+ } else { -+ buf.WriteRune(r) -+ } -+ } -+} -+ -+// SwapCase will swap characters case from upper to lower or lower to upper. -+func SwapCase(str string) string { -+ var r rune -+ var size int -+ -+ buf := &bytes.Buffer{} -+ -+ for len(str) > 0 { -+ r, size = utf8.DecodeRuneInString(str) -+ -+ switch { -+ case unicode.IsUpper(r): -+ buf.WriteRune(unicode.ToLower(r)) -+ -+ case unicode.IsLower(r): -+ buf.WriteRune(unicode.ToUpper(r)) -+ -+ default: -+ buf.WriteRune(r) -+ } -+ -+ str = str[size:] -+ } -+ -+ return buf.String() -+} -+ -+// FirstRuneToUpper converts first rune to upper case if necessary. -+func FirstRuneToUpper(str string) string { -+ if str == """" { -+ return str -+ } -+ -+ r, size := utf8.DecodeRuneInString(str) -+ -+ if !unicode.IsLower(r) { -+ return str -+ } -+ -+ buf := &bytes.Buffer{} -+ buf.WriteRune(unicode.ToUpper(r)) -+ buf.WriteString(str[size:]) -+ return buf.String() -+} -+ -+// FirstRuneToLower converts first rune to lower case if necessary. -+func FirstRuneToLower(str string) string { -+ if str == """" { -+ return str -+ } -+ -+ r, size := utf8.DecodeRuneInString(str) -+ -+ if !unicode.IsUpper(r) { -+ return str -+ } -+ -+ buf := &bytes.Buffer{} -+ buf.WriteRune(unicode.ToLower(r)) -+ buf.WriteString(str[size:]) -+ return buf.String() -+} -+ -+// Shuffle randomizes runes in a string and returns the result. -+// It uses default random source in `math/rand`. -+func Shuffle(str string) string { -+ if str == """" { -+ return str -+ } -+ -+ runes := []rune(str) -+ index := 0 -+ -+ for i := len(runes) - 1; i > 0; i-- { -+ index = rand.Intn(i + 1) -+ -+ if i != index { -+ runes[i], runes[index] = runes[index], runes[i] -+ } -+ } -+ -+ return string(runes) -+} -+ -+// ShuffleSource randomizes runes in a string with given random source. -+func ShuffleSource(str string, src rand.Source) string { -+ if str == """" { -+ return str -+ } -+ -+ runes := []rune(str) -+ index := 0 -+ r := rand.New(src) -+ -+ for i := len(runes) - 1; i > 0; i-- { -+ index = r.Intn(i + 1) -+ -+ if i != index { -+ runes[i], runes[index] = runes[index], runes[i] -+ } -+ } -+ -+ return string(runes) -+} -+ -+// Successor returns the successor to string. -+// -+// If there is one alphanumeric rune is found in string, increase the rune by 1. -+// If increment generates a ""carry"", the rune to the left of it is incremented. -+// This process repeats until there is no carry, adding an additional rune if necessary. -+// -+// If there is no alphanumeric rune, the rightmost rune will be increased by 1 -+// regardless whether the result is a valid rune or not. -+// -+// Only following characters are alphanumeric. -+// * a - z -+// * A - Z -+// * 0 - 9 -+// -+// Samples (borrowed from ruby's String#succ document): -+// ""abcd"" => ""abce"" -+// ""THX1138"" => ""THX1139"" -+// ""<>"" => ""<>"" -+// ""1999zzz"" => ""2000aaa"" -+// ""ZZZ9999"" => ""AAAA0000"" -+// ""***"" => ""**+"" -+func Successor(str string) string { -+ if str == """" { -+ return str -+ } -+ -+ var r rune -+ var i int -+ carry := ' ' -+ runes := []rune(str) -+ l := len(runes) -+ lastAlphanumeric := l -+ -+ for i = l - 1; i >= 0; i-- { -+ r = runes[i] -+ -+ if ('a' <= r && r <= 'y') || -+ ('A' <= r && r <= 'Y') || -+ ('0' <= r && r <= '8') { -+ runes[i]++ -+ carry = ' ' -+ lastAlphanumeric = i -+ break -+ } -+ -+ switch r { -+ case 'z': -+ runes[i] = 'a' -+ carry = 'a' -+ lastAlphanumeric = i -+ -+ case 'Z': -+ runes[i] = 'A' -+ carry = 'A' -+ lastAlphanumeric = i -+ -+ case '9': -+ runes[i] = '0' -+ carry = '0' -+ lastAlphanumeric = i -+ } -+ } -+ -+ // Needs to add one character for carry. -+ if i < 0 && carry != ' ' { -+ buf := &bytes.Buffer{} -+ buf.Grow(l + 4) // Reserve enough space for write. -+ -+ if lastAlphanumeric != 0 { -+ buf.WriteString(str[:lastAlphanumeric]) -+ } -+ -+ buf.WriteRune(carry) -+ -+ for _, r = range runes[lastAlphanumeric:] { -+ buf.WriteRune(r) -+ } -+ -+ return buf.String() -+ } -+ -+ // No alphanumeric character. Simply increase last rune's value. -+ if lastAlphanumeric == l { -+ runes[l-1]++ -+ } -+ -+ return string(runes) -+} -diff --git a/vendor/github.com/huandu/xstrings/count.go b/vendor/github.com/huandu/xstrings/count.go -new file mode 100644 -index 0000000000000..f96e38703a3a6 ---- /dev/null -+++ b/vendor/github.com/huandu/xstrings/count.go -@@ -0,0 +1,120 @@ -+// Copyright 2015 Huan Du. All rights reserved. -+// Licensed under the MIT license that can be found in the LICENSE file. -+ -+package xstrings -+ -+import ( -+ ""unicode"" -+ ""unicode/utf8"" -+) -+ -+// Len returns str's utf8 rune length. -+func Len(str string) int { -+ return utf8.RuneCountInString(str) -+} -+ -+// WordCount returns number of words in a string. -+// -+// Word is defined as a locale dependent string containing alphabetic characters, -+// which may also contain but not start with `'` and `-` characters. -+func WordCount(str string) int { -+ var r rune -+ var size, n int -+ -+ inWord := false -+ -+ for len(str) > 0 { -+ r, size = utf8.DecodeRuneInString(str) -+ -+ switch { -+ case isAlphabet(r): -+ if !inWord { -+ inWord = true -+ n++ -+ } -+ -+ case inWord && (r == '\'' || r == '-'): -+ // Still in word. -+ -+ default: -+ inWord = false -+ } -+ -+ str = str[size:] -+ } -+ -+ return n -+} -+ -+const minCJKCharacter = '\u3400' -+ -+// Checks r is a letter but not CJK character. -+func isAlphabet(r rune) bool { -+ if !unicode.IsLetter(r) { -+ return false -+ } -+ -+ switch { -+ // Quick check for non-CJK character. -+ case r < minCJKCharacter: -+ return true -+ -+ // Common CJK characters. -+ case r >= '\u4E00' && r <= '\u9FCC': -+ return false -+ -+ // Rare CJK characters. -+ case r >= '\u3400' && r <= '\u4D85': -+ return false -+ -+ // Rare and historic CJK characters. -+ case r >= '\U00020000' && r <= '\U0002B81D': -+ return false -+ } -+ -+ return true -+} -+ -+// Width returns string width in monotype font. -+// Multi-byte characters are usually twice the width of single byte characters. -+// -+// Algorithm comes from `mb_strwidth` in PHP. -+// http://php.net/manual/en/function.mb-strwidth.php -+func Width(str string) int { -+ var r rune -+ var size, n int -+ -+ for len(str) > 0 { -+ r, size = utf8.DecodeRuneInString(str) -+ n += RuneWidth(r) -+ str = str[size:] -+ } -+ -+ return n -+} -+ -+// RuneWidth returns character width in monotype font. -+// Multi-byte characters are usually twice the width of single byte characters. -+// -+// Algorithm comes from `mb_strwidth` in PHP. -+// http://php.net/manual/en/function.mb-strwidth.php -+func RuneWidth(r rune) int { -+ switch { -+ case r == utf8.RuneError || r < '\x20': -+ return 0 -+ -+ case '\x20' <= r && r < '\u2000': -+ return 1 -+ -+ case '\u2000' <= r && r < '\uFF61': -+ return 2 -+ -+ case '\uFF61' <= r && r < '\uFFA0': -+ return 1 -+ -+ case '\uFFA0' <= r: -+ return 2 -+ } -+ -+ return 0 -+} -diff --git a/vendor/github.com/huandu/xstrings/doc.go b/vendor/github.com/huandu/xstrings/doc.go -new file mode 100644 -index 0000000000000..1a6ef069f6133 ---- /dev/null -+++ b/vendor/github.com/huandu/xstrings/doc.go -@@ -0,0 +1,8 @@ -+// Copyright 2015 Huan Du. All rights reserved. -+// Licensed under the MIT license that can be found in the LICENSE file. -+ -+// Package xstrings is to provide string algorithms which are useful but not included in `strings` package. -+// See project home page for details. https://github.com/huandu/xstrings -+// -+// Package xstrings assumes all strings are encoded in utf8. -+package xstrings -diff --git a/vendor/github.com/huandu/xstrings/format.go b/vendor/github.com/huandu/xstrings/format.go -new file mode 100644 -index 0000000000000..2d02df1c042f0 ---- /dev/null -+++ b/vendor/github.com/huandu/xstrings/format.go -@@ -0,0 +1,170 @@ -+// Copyright 2015 Huan Du. All rights reserved. -+// Licensed under the MIT license that can be found in the LICENSE file. -+ -+package xstrings -+ -+import ( -+ ""bytes"" -+ ""unicode/utf8"" -+) -+ -+// ExpandTabs can expand tabs ('\t') rune in str to one or more spaces dpending on -+// current column and tabSize. -+// The column number is reset to zero after each newline ('\n') occurring in the str. -+// -+// ExpandTabs uses RuneWidth to decide rune's width. -+// For example, CJK characters will be treated as two characters. -+// -+// If tabSize <= 0, ExpandTabs panics with error. -+// -+// Samples: -+// ExpandTabs(""a\tbc\tdef\tghij\tk"", 4) => ""a bc def ghij k"" -+// ExpandTabs(""abcdefg\thij\nk\tl"", 4) => ""abcdefg hij\nk l"" -+// ExpandTabs(""z中\t文\tw"", 4) => ""z中 文 w"" -+func ExpandTabs(str string, tabSize int) string { -+ if tabSize <= 0 { -+ panic(""tab size must be positive"") -+ } -+ -+ var r rune -+ var i, size, column, expand int -+ var output *bytes.Buffer -+ -+ orig := str -+ -+ for len(str) > 0 { -+ r, size = utf8.DecodeRuneInString(str) -+ -+ if r == '\t' { -+ expand = tabSize - column%tabSize -+ -+ if output == nil { -+ output = allocBuffer(orig, str) -+ } -+ -+ for i = 0; i < expand; i++ { -+ output.WriteByte(byte(' ')) -+ } -+ -+ column += expand -+ } else { -+ if r == '\n' { -+ column = 0 -+ } else { -+ column += RuneWidth(r) -+ } -+ -+ if output != nil { -+ output.WriteRune(r) -+ } -+ } -+ -+ str = str[size:] -+ } -+ -+ if output == nil { -+ return orig -+ } -+ -+ return output.String() -+} -+ -+// LeftJustify returns a string with pad string at right side if str's rune length is smaller than length. -+// If str's rune length is larger than length, str itself will be returned. -+// -+// If pad is an empty string, str will be returned. -+// -+// Samples: -+// LeftJustify(""hello"", 4, "" "") => ""hello"" -+// LeftJustify(""hello"", 10, "" "") => ""hello "" -+// LeftJustify(""hello"", 10, ""123"") => ""hello12312"" -+func LeftJustify(str string, length int, pad string) string { -+ l := Len(str) -+ -+ if l >= length || pad == """" { -+ return str -+ } -+ -+ remains := length - l -+ padLen := Len(pad) -+ -+ output := &bytes.Buffer{} -+ output.Grow(len(str) + (remains/padLen+1)*len(pad)) -+ output.WriteString(str) -+ writePadString(output, pad, padLen, remains) -+ return output.String() -+} -+ -+// RightJustify returns a string with pad string at left side if str's rune length is smaller than length. -+// If str's rune length is larger than length, str itself will be returned. -+// -+// If pad is an empty string, str will be returned. -+// -+// Samples: -+// RightJustify(""hello"", 4, "" "") => ""hello"" -+// RightJustify(""hello"", 10, "" "") => "" hello"" -+// RightJustify(""hello"", 10, ""123"") => ""12312hello"" -+func RightJustify(str string, length int, pad string) string { -+ l := Len(str) -+ -+ if l >= length || pad == """" { -+ return str -+ } -+ -+ remains := length - l -+ padLen := Len(pad) -+ -+ output := &bytes.Buffer{} -+ output.Grow(len(str) + (remains/padLen+1)*len(pad)) -+ writePadString(output, pad, padLen, remains) -+ output.WriteString(str) -+ return output.String() -+} -+ -+// Center returns a string with pad string at both side if str's rune length is smaller than length. -+// If str's rune length is larger than length, str itself will be returned. -+// -+// If pad is an empty string, str will be returned. -+// -+// Samples: -+// Center(""hello"", 4, "" "") => ""hello"" -+// Center(""hello"", 10, "" "") => "" hello "" -+// Center(""hello"", 10, ""123"") => ""12hello123"" -+func Center(str string, length int, pad string) string { -+ l := Len(str) -+ -+ if l >= length || pad == """" { -+ return str -+ } -+ -+ remains := length - l -+ padLen := Len(pad) -+ -+ output := &bytes.Buffer{} -+ output.Grow(len(str) + (remains/padLen+1)*len(pad)) -+ writePadString(output, pad, padLen, remains/2) -+ output.WriteString(str) -+ writePadString(output, pad, padLen, (remains+1)/2) -+ return output.String() -+} -+ -+func writePadString(output *bytes.Buffer, pad string, padLen, remains int) { -+ var r rune -+ var size int -+ -+ repeats := remains / padLen -+ -+ for i := 0; i < repeats; i++ { -+ output.WriteString(pad) -+ } -+ -+ remains = remains % padLen -+ -+ if remains != 0 { -+ for i := 0; i < remains; i++ { -+ r, size = utf8.DecodeRuneInString(pad) -+ output.WriteRune(r) -+ pad = pad[size:] -+ } -+ } -+} -diff --git a/vendor/github.com/huandu/xstrings/go.mod b/vendor/github.com/huandu/xstrings/go.mod -new file mode 100644 -index 0000000000000..3982c204ca47a ---- /dev/null -+++ b/vendor/github.com/huandu/xstrings/go.mod -@@ -0,0 +1,3 @@ -+module github.com/huandu/xstrings -+ -+go 1.12 -diff --git a/vendor/github.com/huandu/xstrings/manipulate.go b/vendor/github.com/huandu/xstrings/manipulate.go -new file mode 100644 -index 0000000000000..0eefb43ed71d2 ---- /dev/null -+++ b/vendor/github.com/huandu/xstrings/manipulate.go -@@ -0,0 +1,217 @@ -+// Copyright 2015 Huan Du. All rights reserved. -+// Licensed under the MIT license that can be found in the LICENSE file. -+ -+package xstrings -+ -+import ( -+ ""bytes"" -+ ""strings"" -+ ""unicode/utf8"" -+) -+ -+// Reverse a utf8 encoded string. -+func Reverse(str string) string { -+ var size int -+ -+ tail := len(str) -+ buf := make([]byte, tail) -+ s := buf -+ -+ for len(str) > 0 { -+ _, size = utf8.DecodeRuneInString(str) -+ tail -= size -+ s = append(s[:tail], []byte(str[:size])...) -+ str = str[size:] -+ } -+ -+ return string(buf) -+} -+ -+// Slice a string by rune. -+// -+// Start must satisfy 0 <= start <= rune length. -+// -+// End can be positive, zero or negative. -+// If end >= 0, start and end must satisfy start <= end <= rune length. -+// If end < 0, it means slice to the end of string. -+// -+// Otherwise, Slice will panic as out of range. -+func Slice(str string, start, end int) string { -+ var size, startPos, endPos int -+ -+ origin := str -+ -+ if start < 0 || end > len(str) || (end >= 0 && start > end) { -+ panic(""out of range"") -+ } -+ -+ if end >= 0 { -+ end -= start -+ } -+ -+ for start > 0 && len(str) > 0 { -+ _, size = utf8.DecodeRuneInString(str) -+ start-- -+ startPos += size -+ str = str[size:] -+ } -+ -+ if end < 0 { -+ return origin[startPos:] -+ } -+ -+ endPos = startPos -+ -+ for end > 0 && len(str) > 0 { -+ _, size = utf8.DecodeRuneInString(str) -+ end-- -+ endPos += size -+ str = str[size:] -+ } -+ -+ if len(str) == 0 && (start > 0 || end > 0) { -+ panic(""out of range"") -+ } -+ -+ return origin[startPos:endPos] -+} -+ -+// Partition splits a string by sep into three parts. -+// The return value is a slice of strings with head, match and tail. -+// -+// If str contains sep, for example ""hello"" and ""l"", Partition returns -+// ""he"", ""l"", ""lo"" -+// -+// If str doesn't contain sep, for example ""hello"" and ""x"", Partition returns -+// ""hello"", """", """" -+func Partition(str, sep string) (head, match, tail string) { -+ index := strings.Index(str, sep) -+ -+ if index == -1 { -+ head = str -+ return -+ } -+ -+ head = str[:index] -+ match = str[index : index+len(sep)] -+ tail = str[index+len(sep):] -+ return -+} -+ -+// LastPartition splits a string by last instance of sep into three parts. -+// The return value is a slice of strings with head, match and tail. -+// -+// If str contains sep, for example ""hello"" and ""l"", LastPartition returns -+// ""hel"", ""l"", ""o"" -+// -+// If str doesn't contain sep, for example ""hello"" and ""x"", LastPartition returns -+// """", """", ""hello"" -+func LastPartition(str, sep string) (head, match, tail string) { -+ index := strings.LastIndex(str, sep) -+ -+ if index == -1 { -+ tail = str -+ return -+ } -+ -+ head = str[:index] -+ match = str[index : index+len(sep)] -+ tail = str[index+len(sep):] -+ return -+} -+ -+// Insert src into dst at given rune index. -+// Index is counted by runes instead of bytes. -+// -+// If index is out of range of dst, panic with out of range. -+func Insert(dst, src string, index int) string { -+ return Slice(dst, 0, index) + src + Slice(dst, index, -1) -+} -+ -+// Scrub scrubs invalid utf8 bytes with repl string. -+// Adjacent invalid bytes are replaced only once. -+func Scrub(str, repl string) string { -+ var buf *bytes.Buffer -+ var r rune -+ var size, pos int -+ var hasError bool -+ -+ origin := str -+ -+ for len(str) > 0 { -+ r, size = utf8.DecodeRuneInString(str) -+ -+ if r == utf8.RuneError { -+ if !hasError { -+ if buf == nil { -+ buf = &bytes.Buffer{} -+ } -+ -+ buf.WriteString(origin[:pos]) -+ hasError = true -+ } -+ } else if hasError { -+ hasError = false -+ buf.WriteString(repl) -+ -+ origin = origin[pos:] -+ pos = 0 -+ } -+ -+ pos += size -+ str = str[size:] -+ } -+ -+ if buf != nil { -+ buf.WriteString(origin) -+ return buf.String() -+ } -+ -+ // No invalid byte. -+ return origin -+} -+ -+// WordSplit splits a string into words. Returns a slice of words. -+// If there is no word in a string, return nil. -+// -+// Word is defined as a locale dependent string containing alphabetic characters, -+// which may also contain but not start with `'` and `-` characters. -+func WordSplit(str string) []string { -+ var word string -+ var words []string -+ var r rune -+ var size, pos int -+ -+ inWord := false -+ -+ for len(str) > 0 { -+ r, size = utf8.DecodeRuneInString(str) -+ -+ switch { -+ case isAlphabet(r): -+ if !inWord { -+ inWord = true -+ word = str -+ pos = 0 -+ } -+ -+ case inWord && (r == '\'' || r == '-'): -+ // Still in word. -+ -+ default: -+ if inWord { -+ inWord = false -+ words = append(words, word[:pos]) -+ } -+ } -+ -+ pos += size -+ str = str[size:] -+ } -+ -+ if inWord { -+ words = append(words, word[:pos]) -+ } -+ -+ return words -+} -diff --git a/vendor/github.com/huandu/xstrings/translate.go b/vendor/github.com/huandu/xstrings/translate.go -new file mode 100644 -index 0000000000000..66e23f86d030c ---- /dev/null -+++ b/vendor/github.com/huandu/xstrings/translate.go -@@ -0,0 +1,547 @@ -+// Copyright 2015 Huan Du. All rights reserved. -+// Licensed under the MIT license that can be found in the LICENSE file. -+ -+package xstrings -+ -+import ( -+ ""bytes"" -+ ""unicode"" -+ ""unicode/utf8"" -+) -+ -+type runeRangeMap struct { -+ FromLo rune // Lower bound of range map. -+ FromHi rune // An inclusive higher bound of range map. -+ ToLo rune -+ ToHi rune -+} -+ -+type runeDict struct { -+ Dict [unicode.MaxASCII + 1]rune -+} -+ -+type runeMap map[rune]rune -+ -+// Translator can translate string with pre-compiled from and to patterns. -+// If a from/to pattern pair needs to be used more than once, it's recommended -+// to create a Translator and reuse it. -+type Translator struct { -+ quickDict *runeDict // A quick dictionary to look up rune by index. Only available for latin runes. -+ runeMap runeMap // Rune map for translation. -+ ranges []*runeRangeMap // Ranges of runes. -+ mappedRune rune // If mappedRune >= 0, all matched runes are translated to the mappedRune. -+ reverted bool // If to pattern is empty, all matched characters will be deleted. -+ hasPattern bool -+} -+ -+// NewTranslator creates new Translator through a from/to pattern pair. -+func NewTranslator(from, to string) *Translator { -+ tr := &Translator{} -+ -+ if from == """" { -+ return tr -+ } -+ -+ reverted := from[0] == '^' -+ deletion := len(to) == 0 -+ -+ if reverted { -+ from = from[1:] -+ } -+ -+ var fromStart, fromEnd, fromRangeStep rune -+ var toStart, toEnd, toRangeStep rune -+ var fromRangeSize, toRangeSize rune -+ var singleRunes []rune -+ -+ // Update the to rune range. -+ updateRange := func() { -+ // No more rune to read in the to rune pattern. -+ if toEnd == utf8.RuneError { -+ return -+ } -+ -+ if toRangeStep == 0 { -+ to, toStart, toEnd, toRangeStep = nextRuneRange(to, toEnd) -+ return -+ } -+ -+ // Current range is not empty. Consume 1 rune from start. -+ if toStart != toEnd { -+ toStart += toRangeStep -+ return -+ } -+ -+ // No more rune. Repeat the last rune. -+ if to == """" { -+ toEnd = utf8.RuneError -+ return -+ } -+ -+ // Both start and end are used. Read two more runes from the to pattern. -+ to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) -+ } -+ -+ if deletion { -+ toStart = utf8.RuneError -+ toEnd = utf8.RuneError -+ } else { -+ // If from pattern is reverted, only the last rune in the to pattern will be used. -+ if reverted { -+ var size int -+ -+ for len(to) > 0 { -+ toStart, size = utf8.DecodeRuneInString(to) -+ to = to[size:] -+ } -+ -+ toEnd = utf8.RuneError -+ } else { -+ to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) -+ } -+ } -+ -+ fromEnd = utf8.RuneError -+ -+ for len(from) > 0 { -+ from, fromStart, fromEnd, fromRangeStep = nextRuneRange(from, fromEnd) -+ -+ // fromStart is a single character. Just map it with a rune in the to pattern. -+ if fromRangeStep == 0 { -+ singleRunes = tr.addRune(fromStart, toStart, singleRunes) -+ updateRange() -+ continue -+ } -+ -+ for toEnd != utf8.RuneError && fromStart != fromEnd { -+ // If mapped rune is a single character instead of a range, simply shift first -+ // rune in the range. -+ if toRangeStep == 0 { -+ singleRunes = tr.addRune(fromStart, toStart, singleRunes) -+ updateRange() -+ fromStart += fromRangeStep -+ continue -+ } -+ -+ fromRangeSize = (fromEnd - fromStart) * fromRangeStep -+ toRangeSize = (toEnd - toStart) * toRangeStep -+ -+ // Not enough runes in the to pattern. Need to read more. -+ if fromRangeSize > toRangeSize { -+ fromStart, toStart = tr.addRuneRange(fromStart, fromStart+toRangeSize*fromRangeStep, toStart, toEnd, singleRunes) -+ fromStart += fromRangeStep -+ updateRange() -+ -+ // Edge case: If fromRangeSize == toRangeSize + 1, the last fromStart value needs be considered -+ // as a single rune. -+ if fromStart == fromEnd { -+ singleRunes = tr.addRune(fromStart, toStart, singleRunes) -+ updateRange() -+ } -+ -+ continue -+ } -+ -+ fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart+fromRangeSize*toRangeStep, singleRunes) -+ updateRange() -+ break -+ } -+ -+ if fromStart == fromEnd { -+ fromEnd = utf8.RuneError -+ continue -+ } -+ -+ fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart, singleRunes) -+ fromEnd = utf8.RuneError -+ } -+ -+ if fromEnd != utf8.RuneError { -+ singleRunes = tr.addRune(fromEnd, toStart, singleRunes) -+ } -+ -+ tr.reverted = reverted -+ tr.mappedRune = -1 -+ tr.hasPattern = true -+ -+ // Translate RuneError only if in deletion or reverted mode. -+ if deletion || reverted { -+ tr.mappedRune = toStart -+ } -+ -+ return tr -+} -+ -+func (tr *Translator) addRune(from, to rune, singleRunes []rune) []rune { -+ if from <= unicode.MaxASCII { -+ if tr.quickDict == nil { -+ tr.quickDict = &runeDict{} -+ } -+ -+ tr.quickDict.Dict[from] = to -+ } else { -+ if tr.runeMap == nil { -+ tr.runeMap = make(runeMap) -+ } -+ -+ tr.runeMap[from] = to -+ } -+ -+ singleRunes = append(singleRunes, from) -+ return singleRunes -+} -+ -+func (tr *Translator) addRuneRange(fromLo, fromHi, toLo, toHi rune, singleRunes []rune) (rune, rune) { -+ var r rune -+ var rrm *runeRangeMap -+ -+ if fromLo < fromHi { -+ rrm = &runeRangeMap{ -+ FromLo: fromLo, -+ FromHi: fromHi, -+ ToLo: toLo, -+ ToHi: toHi, -+ } -+ } else { -+ rrm = &runeRangeMap{ -+ FromLo: fromHi, -+ FromHi: fromLo, -+ ToLo: toHi, -+ ToHi: toLo, -+ } -+ } -+ -+ // If there is any single rune conflicts with this rune range, clear single rune record. -+ for _, r = range singleRunes { -+ if rrm.FromLo <= r && r <= rrm.FromHi { -+ if r <= unicode.MaxASCII { -+ tr.quickDict.Dict[r] = 0 -+ } else { -+ delete(tr.runeMap, r) -+ } -+ } -+ } -+ -+ tr.ranges = append(tr.ranges, rrm) -+ return fromHi, toHi -+} -+ -+func nextRuneRange(str string, last rune) (remaining string, start, end rune, rangeStep rune) { -+ var r rune -+ var size int -+ -+ remaining = str -+ escaping := false -+ isRange := false -+ -+ for len(remaining) > 0 { -+ r, size = utf8.DecodeRuneInString(remaining) -+ remaining = remaining[size:] -+ -+ // Parse special characters. -+ if !escaping { -+ if r == '\\' { -+ escaping = true -+ continue -+ } -+ -+ if r == '-' { -+ // Ignore slash at beginning of string. -+ if last == utf8.RuneError { -+ continue -+ } -+ -+ start = last -+ isRange = true -+ continue -+ } -+ } -+ -+ escaping = false -+ -+ if last != utf8.RuneError { -+ // This is a range which start and end are the same. -+ // Considier it as a normal character. -+ if isRange && last == r { -+ isRange = false -+ continue -+ } -+ -+ start = last -+ end = r -+ -+ if isRange { -+ if start < end { -+ rangeStep = 1 -+ } else { -+ rangeStep = -1 -+ } -+ } -+ -+ return -+ } -+ -+ last = r -+ } -+ -+ start = last -+ end = utf8.RuneError -+ return -+} -+ -+// Translate str with a from/to pattern pair. -+// -+// See comment in Translate function for usage and samples. -+func (tr *Translator) Translate(str string) string { -+ if !tr.hasPattern || str == """" { -+ return str -+ } -+ -+ var r rune -+ var size int -+ var needTr bool -+ -+ orig := str -+ -+ var output *bytes.Buffer -+ -+ for len(str) > 0 { -+ r, size = utf8.DecodeRuneInString(str) -+ r, needTr = tr.TranslateRune(r) -+ -+ if needTr && output == nil { -+ output = allocBuffer(orig, str) -+ } -+ -+ if r != utf8.RuneError && output != nil { -+ output.WriteRune(r) -+ } -+ -+ str = str[size:] -+ } -+ -+ // No character is translated. -+ if output == nil { -+ return orig -+ } -+ -+ return output.String() -+} -+ -+// TranslateRune return translated rune and true if r matches the from pattern. -+// If r doesn't match the pattern, original r is returned and translated is false. -+func (tr *Translator) TranslateRune(r rune) (result rune, translated bool) { -+ switch { -+ case tr.quickDict != nil: -+ if r <= unicode.MaxASCII { -+ result = tr.quickDict.Dict[r] -+ -+ if result != 0 { -+ translated = true -+ -+ if tr.mappedRune >= 0 { -+ result = tr.mappedRune -+ } -+ -+ break -+ } -+ } -+ -+ fallthrough -+ -+ case tr.runeMap != nil: -+ var ok bool -+ -+ if result, ok = tr.runeMap[r]; ok { -+ translated = true -+ -+ if tr.mappedRune >= 0 { -+ result = tr.mappedRune -+ } -+ -+ break -+ } -+ -+ fallthrough -+ -+ default: -+ var rrm *runeRangeMap -+ ranges := tr.ranges -+ -+ for i := len(ranges) - 1; i >= 0; i-- { -+ rrm = ranges[i] -+ -+ if rrm.FromLo <= r && r <= rrm.FromHi { -+ translated = true -+ -+ if tr.mappedRune >= 0 { -+ result = tr.mappedRune -+ break -+ } -+ -+ if rrm.ToLo < rrm.ToHi { -+ result = rrm.ToLo + r - rrm.FromLo -+ } else if rrm.ToLo > rrm.ToHi { -+ // ToHi can be smaller than ToLo if range is from higher to lower. -+ result = rrm.ToLo - r + rrm.FromLo -+ } else { -+ result = rrm.ToLo -+ } -+ -+ break -+ } -+ } -+ } -+ -+ if tr.reverted { -+ if !translated { -+ result = tr.mappedRune -+ } -+ -+ translated = !translated -+ } -+ -+ if !translated { -+ result = r -+ } -+ -+ return -+} -+ -+// HasPattern returns true if Translator has one pattern at least. -+func (tr *Translator) HasPattern() bool { -+ return tr.hasPattern -+} -+ -+// Translate str with the characters defined in from replaced by characters defined in to. -+// -+// From and to are patterns representing a set of characters. Pattern is defined as following. -+// -+// * Special characters -+// * '-' means a range of runes, e.g. -+// * ""a-z"" means all characters from 'a' to 'z' inclusive; -+// * ""z-a"" means all characters from 'z' to 'a' inclusive. -+// * '^' as first character means a set of all runes excepted listed, e.g. -+// * ""^a-z"" means all characters except 'a' to 'z' inclusive. -+// * '\' escapes special characters. -+// * Normal character represents itself, e.g. ""abc"" is a set including 'a', 'b' and 'c'. -+// -+// Translate will try to find a 1:1 mapping from from to to. -+// If to is smaller than from, last rune in to will be used to map ""out of range"" characters in from. -+// -+// Note that '^' only works in the from pattern. It will be considered as a normal character in the to pattern. -+// -+// If the to pattern is an empty string, Translate works exactly the same as Delete. -+// -+// Samples: -+// Translate(""hello"", ""aeiou"", ""12345"") => ""h2ll4"" -+// Translate(""hello"", ""a-z"", ""A-Z"") => ""HELLO"" -+// Translate(""hello"", ""z-a"", ""a-z"") => ""svool"" -+// Translate(""hello"", ""aeiou"", ""*"") => ""h*ll*"" -+// Translate(""hello"", ""^l"", ""*"") => ""**ll*"" -+// Translate(""hello ^ world"", `\^lo`, ""*"") => ""he*** * w*r*d"" -+func Translate(str, from, to string) string { -+ tr := NewTranslator(from, to) -+ return tr.Translate(str) -+} -+ -+// Delete runes in str matching the pattern. -+// Pattern is defined in Translate function. -+// -+// Samples: -+// Delete(""hello"", ""aeiou"") => ""hll"" -+// Delete(""hello"", ""a-k"") => ""llo"" -+// Delete(""hello"", ""^a-k"") => ""he"" -+func Delete(str, pattern string) string { -+ tr := NewTranslator(pattern, """") -+ return tr.Translate(str) -+} -+ -+// Count how many runes in str match the pattern. -+// Pattern is defined in Translate function. -+// -+// Samples: -+// Count(""hello"", ""aeiou"") => 3 -+// Count(""hello"", ""a-k"") => 3 -+// Count(""hello"", ""^a-k"") => 2 -+func Count(str, pattern string) int { -+ if pattern == """" || str == """" { -+ return 0 -+ } -+ -+ var r rune -+ var size int -+ var matched bool -+ -+ tr := NewTranslator(pattern, """") -+ cnt := 0 -+ -+ for len(str) > 0 { -+ r, size = utf8.DecodeRuneInString(str) -+ str = str[size:] -+ -+ if _, matched = tr.TranslateRune(r); matched { -+ cnt++ -+ } -+ } -+ -+ return cnt -+} -+ -+// Squeeze deletes adjacent repeated runes in str. -+// If pattern is not empty, only runes matching the pattern will be squeezed. -+// -+// Samples: -+// Squeeze(""hello"", """") => ""helo"" -+// Squeeze(""hello"", ""m-z"") => ""hello"" -+// Squeeze(""hello world"", "" "") => ""hello world"" -+func Squeeze(str, pattern string) string { -+ var last, r rune -+ var size int -+ var skipSqueeze, matched bool -+ var tr *Translator -+ var output *bytes.Buffer -+ -+ orig := str -+ last = -1 -+ -+ if len(pattern) > 0 { -+ tr = NewTranslator(pattern, """") -+ } -+ -+ for len(str) > 0 { -+ r, size = utf8.DecodeRuneInString(str) -+ -+ // Need to squeeze the str. -+ if last == r && !skipSqueeze { -+ if tr != nil { -+ if _, matched = tr.TranslateRune(r); !matched { -+ skipSqueeze = true -+ } -+ } -+ -+ if output == nil { -+ output = allocBuffer(orig, str) -+ } -+ -+ if skipSqueeze { -+ output.WriteRune(r) -+ } -+ } else { -+ if output != nil { -+ output.WriteRune(r) -+ } -+ -+ last = r -+ skipSqueeze = false -+ } -+ -+ str = str[size:] -+ } -+ -+ if output == nil { -+ return orig -+ } -+ -+ return output.String() -+} -diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md -index 02fc81e0626e3..876abb500a4e4 100644 ---- a/vendor/github.com/imdario/mergo/README.md -+++ b/vendor/github.com/imdario/mergo/README.md -@@ -1,44 +1,54 @@ - # Mergo - --A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. -- --Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. -- --## Status -- --It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). - - [![GoDoc][3]][4] --[![GoCard][5]][6] -+[![GitHub release][5]][6] -+[![GoCard][7]][8] - [![Build Status][1]][2] --[![Coverage Status][7]][8] --[![Sourcegraph][9]][10] --[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield) -+[![Coverage Status][9]][10] -+[![Sourcegraph][11]][12] -+[![FOSSA Status][13]][14] -+ -+[![GoCenter Kudos][15]][16] - - [1]: https://travis-ci.org/imdario/mergo.png - [2]: https://travis-ci.org/imdario/mergo - [3]: https://godoc.org/github.com/imdario/mergo?status.svg - [4]: https://godoc.org/github.com/imdario/mergo --[5]: https://goreportcard.com/badge/imdario/mergo --[6]: https://goreportcard.com/report/github.com/imdario/mergo --[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master --[8]: https://coveralls.io/github/imdario/mergo?branch=master --[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg --[10]: https://sourcegraph.com/github.com/imdario/mergo?badge -+[5]: https://img.shields.io/github/release/imdario/mergo.svg -+[6]: https://github.com/imdario/mergo/releases -+[7]: https://goreportcard.com/badge/imdario/mergo -+[8]: https://goreportcard.com/report/github.com/imdario/mergo -+[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -+[10]: https://coveralls.io/github/imdario/mergo?branch=master -+[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -+[12]: https://sourcegraph.com/github.com/imdario/mergo?badge -+[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield -+[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -+[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo -+[16]: https://search.gocenter.io/github.com/imdario/mergo - --### Latest release -+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - --[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7). -+Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). -+ -+Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. -+ -+## Status -+ -+It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). - - ### Important note - --Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. -+Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules. - --If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). -+Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. -+ -+If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - - ### Donations - --If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: -+If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: - - Buy Me a Coffee at ko-fi.com - [![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) -@@ -87,8 +97,9 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month - - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - - [jnuthong/item_search](https://github.com/jnuthong/item_search) - - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -+- [janoszen/containerssh](https://github.com/janoszen/containerssh) - --## Installation -+## Install - - go get github.com/imdario/mergo - -@@ -99,7 +110,7 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month - - ## Usage - --You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). -+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - - ```go - if err := mergo.Merge(&dst, src); err != nil { -@@ -125,9 +136,7 @@ if err := mergo.Map(&dst, srcMap); err != nil { - - Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. - --More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). -- --### Nice example -+Here is a nice example: - - ```go - package main -@@ -175,10 +184,10 @@ import ( - ""time"" - ) - --type timeTransfomer struct { -+type timeTransformer struct { - } - --func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { -+func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { -@@ -202,7 +211,7 @@ type Snapshot struct { - func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} -- mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) -+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } -diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go -index 6e9aa7baf3540..fcd985f995dc2 100644 ---- a/vendor/github.com/imdario/mergo/doc.go -+++ b/vendor/github.com/imdario/mergo/doc.go -@@ -4,41 +4,140 @@ - // license that can be found in the LICENSE file. - - /* --Package mergo merges same-type structs and maps by setting default values in zero-value fields. -+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - --Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). -+Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). -+ -+Status -+ -+It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. -+ -+Important note -+ -+Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. -+ -+Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. -+ -+If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). -+ -+Install -+ -+Do your usual installation procedure: -+ -+ go get github.com/imdario/mergo -+ -+ // use in your .go code -+ import ( -+ ""github.com/imdario/mergo"" -+ ) - - Usage - --From my own work-in-progress project: -+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). -+ -+ if err := mergo.Merge(&dst, src); err != nil { -+ // ... -+ } -+ -+Also, you can merge overwriting values using the transformer WithOverride. -+ -+ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { -+ // ... -+ } -+ -+Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. -+ -+ if err := mergo.Map(&dst, srcMap); err != nil { -+ // ... -+ } -+ -+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. -+ -+Here is a nice example: -+ -+ package main -+ -+ import ( -+ ""fmt"" -+ ""github.com/imdario/mergo"" -+ ) - -- type networkConfig struct { -- Protocol string -- Address string -- ServerType string `json: ""server_type""` -- Port uint16 -+ type Foo struct { -+ A string -+ B int64 - } - -- type FssnConfig struct { -- Network networkConfig -+ func main() { -+ src := Foo{ -+ A: ""one"", -+ B: 2, -+ } -+ dest := Foo{ -+ A: ""two"", -+ } -+ mergo.Merge(&dest, src) -+ fmt.Println(dest) -+ // Will print -+ // {two 2} - } - -- var fssnDefault = FssnConfig { -- networkConfig { -- ""tcp"", -- ""127.0.0.1"", -- ""http"", -- 31560, -- }, -+Transformers -+ -+Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? -+ -+ package main -+ -+ import ( -+ ""fmt"" -+ ""github.com/imdario/mergo"" -+ ""reflect"" -+ ""time"" -+ ) -+ -+ type timeTransformer struct { - } - -- // Inside a function [...] -+ func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { -+ if typ == reflect.TypeOf(time.Time{}) { -+ return func(dst, src reflect.Value) error { -+ if dst.CanSet() { -+ isZero := dst.MethodByName(""IsZero"") -+ result := isZero.Call([]reflect.Value{}) -+ if result[0].Bool() { -+ dst.Set(src) -+ } -+ } -+ return nil -+ } -+ } -+ return nil -+ } -+ -+ type Snapshot struct { -+ Time time.Time -+ // ... -+ } - -- if err := mergo.Merge(&config, fssnDefault); err != nil { -- log.Fatal(err) -+ func main() { -+ src := Snapshot{time.Now()} -+ dest := Snapshot{} -+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) -+ fmt.Println(dest) -+ // Will print -+ // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } - } - -- // More code [...] -+Contact me -+ -+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario -+ -+About -+ -+Written by Dario Castañé: https://da.rio.hn -+ -+License -+ -+BSD 3-Clause license, as Go language. - - */ - package mergo -diff --git a/vendor/github.com/imdario/mergo/go.mod b/vendor/github.com/imdario/mergo/go.mod -new file mode 100644 -index 0000000000000..3d689d93eb3b0 ---- /dev/null -+++ b/vendor/github.com/imdario/mergo/go.mod -@@ -0,0 +1,5 @@ -+module github.com/imdario/mergo -+ -+go 1.13 -+ -+require gopkg.in/yaml.v2 v2.3.0 -diff --git a/vendor/github.com/imdario/mergo/go.sum b/vendor/github.com/imdario/mergo/go.sum -new file mode 100644 -index 0000000000000..168980da5f749 ---- /dev/null -+++ b/vendor/github.com/imdario/mergo/go.sum -@@ -0,0 +1,4 @@ -+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go -index d83258b4dda22..a13a7ee46c777 100644 ---- a/vendor/github.com/imdario/mergo/map.go -+++ b/vendor/github.com/imdario/mergo/map.go -@@ -99,11 +99,11 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf - continue - } - if srcKind == dstKind { -- if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { -+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { -- if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { -+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if srcKind == reflect.Map { -@@ -141,6 +141,9 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - } - - func _map(dst, src interface{}, opts ...func(*Config)) error { -+ if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { -+ return ErrNonPointerAgument -+ } - var ( - vDst, vSrc reflect.Value - err error -@@ -157,8 +160,7 @@ func _map(dst, src interface{}, opts ...func(*Config)) error { - // To be friction-less, we redirect equal-type arguments - // to deepMerge. Only because arguments can be anything. - if vSrc.Kind() == vDst.Kind() { -- _, err := deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) -- return err -+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - } - switch vSrc.Kind() { - case reflect.Struct: -diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go -index 3332c9c2a7ac3..afa84a1e2985e 100644 ---- a/vendor/github.com/imdario/mergo/merge.go -+++ b/vendor/github.com/imdario/mergo/merge.go -@@ -11,26 +11,26 @@ package mergo - import ( - ""fmt"" - ""reflect"" -- ""unsafe"" - ) - --func hasExportedField(dst reflect.Value) (exported bool) { -+func hasMergeableFields(dst reflect.Value) (exported bool) { - for i, n := 0, dst.NumField(); i < n; i++ { - field := dst.Type().Field(i) -- if isExportedComponent(&field) { -- return true -+ if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { -+ exported = exported || hasMergeableFields(dst.Field(i)) -+ } else if isExportedComponent(&field) { -+ exported = exported || len(field.PkgPath) == 0 - } - } - return - } - - func isExportedComponent(field *reflect.StructField) bool { -- name := field.Name - pkgPath := field.PkgPath - if len(pkgPath) > 0 { - return false - } -- c := name[0] -+ c := field.Name[0] - if 'a' <= c && c <= 'z' || c == '_' { - return false - } -@@ -44,6 +44,8 @@ type Config struct { - Transformers Transformers - overwriteWithEmptyValue bool - overwriteSliceWithEmptyValue bool -+ sliceDeepCopy bool -+ debug bool - } - - type Transformers interface { -@@ -53,17 +55,16 @@ type Transformers interface { - // Traverses recursively both values, assigning src's fields values to dst. - // The map argument tracks comparisons that have already been seen, which allows - // short circuiting on recursive types. --func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (dst reflect.Value, err error) { -- dst = dstIn -+func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - typeCheck := config.TypeCheck - overwriteWithEmptySrc := config.overwriteWithEmptyValue - overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue -+ sliceDeepCopy := config.sliceDeepCopy - - if !src.IsValid() { - return - } -- - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr -@@ -71,7 +72,7 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int, - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { -- return dst, nil -+ return nil - } - } - // Remember, remember... -@@ -85,126 +86,154 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int, - } - } - -- if dst.IsValid() && src.IsValid() && src.Type() != dst.Type() { -- err = fmt.Errorf(""cannot append two different types (%s, %s)"", src.Kind(), dst.Kind()) -- return -- } -- - switch dst.Kind() { - case reflect.Struct: -- if hasExportedField(dst) { -- dstCp := reflect.New(dst.Type()).Elem() -+ if hasMergeableFields(dst) { - for i, n := 0, dst.NumField(); i < n; i++ { -- dstField := dst.Field(i) -- structField := dst.Type().Field(i) -- // copy un-exported struct fields -- if !isExportedComponent(&structField) { -- rf := dstCp.Field(i) -- rf = reflect.NewAt(rf.Type(), unsafe.Pointer(rf.UnsafeAddr())).Elem() //nolint:gosec -- dstRF := dst.Field(i) -- if !dst.Field(i).CanAddr() { -- continue -- } -- -- dstRF = reflect.NewAt(dstRF.Type(), unsafe.Pointer(dstRF.UnsafeAddr())).Elem() //nolint:gosec -- rf.Set(dstRF) -- continue -- } -- dstField, err = deepMerge(dstField, src.Field(i), visited, depth+1, config) -- if err != nil { -+ if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { - return - } -- dstCp.Field(i).Set(dstField) - } -- -- if dst.CanSet() { -- dst.Set(dstCp) -- } else { -- dst = dstCp -- } -- return - } else { - if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { -- dst = src -+ dst.Set(src) - } - } -- - case reflect.Map: - if dst.IsNil() && !src.IsNil() { -- if dst.CanSet() { -- dst.Set(reflect.MakeMap(dst.Type())) -- } else { -- dst = src -- return -+ dst.Set(reflect.MakeMap(dst.Type())) -+ } -+ -+ if src.Kind() != reflect.Map { -+ if overwrite { -+ dst.Set(src) - } -+ return - } -+ - for _, key := range src.MapKeys() { - srcElement := src.MapIndex(key) -- dstElement := dst.MapIndex(key) - if !srcElement.IsValid() { - continue - } -- if dst.MapIndex(key).IsValid() { -- k := dstElement.Interface() -- dstElement = reflect.ValueOf(k) -- } -- if isReflectNil(srcElement) { -- if overwrite || isReflectNil(dstElement) { -- dst.SetMapIndex(key, srcElement) -+ dstElement := dst.MapIndex(key) -+ switch srcElement.Kind() { -+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: -+ if srcElement.IsNil() { -+ if overwrite { -+ dst.SetMapIndex(key, srcElement) -+ } -+ continue -+ } -+ fallthrough -+ default: -+ if !srcElement.CanInterface() { -+ continue -+ } -+ switch reflect.TypeOf(srcElement.Interface()).Kind() { -+ case reflect.Struct: -+ fallthrough -+ case reflect.Ptr: -+ fallthrough -+ case reflect.Map: -+ srcMapElm := srcElement -+ dstMapElm := dstElement -+ if srcMapElm.CanInterface() { -+ srcMapElm = reflect.ValueOf(srcMapElm.Interface()) -+ if dstMapElm.IsValid() { -+ dstMapElm = reflect.ValueOf(dstMapElm.Interface()) -+ } -+ } -+ if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { -+ return -+ } -+ case reflect.Slice: -+ srcSlice := reflect.ValueOf(srcElement.Interface()) -+ -+ var dstSlice reflect.Value -+ if !dstElement.IsValid() || dstElement.IsNil() { -+ dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) -+ } else { -+ dstSlice = reflect.ValueOf(dstElement.Interface()) -+ } -+ -+ if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { -+ if typeCheck && srcSlice.Type() != dstSlice.Type() { -+ return fmt.Errorf(""cannot override two slices with different type (%s, %s)"", srcSlice.Type(), dstSlice.Type()) -+ } -+ dstSlice = srcSlice -+ } else if config.AppendSlice { -+ if srcSlice.Type() != dstSlice.Type() { -+ return fmt.Errorf(""cannot append two slices with different type (%s, %s)"", srcSlice.Type(), dstSlice.Type()) -+ } -+ dstSlice = reflect.AppendSlice(dstSlice, srcSlice) -+ } else if sliceDeepCopy { -+ i := 0 -+ for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { -+ srcElement := srcSlice.Index(i) -+ dstElement := dstSlice.Index(i) -+ -+ if srcElement.CanInterface() { -+ srcElement = reflect.ValueOf(srcElement.Interface()) -+ } -+ if dstElement.CanInterface() { -+ dstElement = reflect.ValueOf(dstElement.Interface()) -+ } -+ -+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { -+ return -+ } -+ } -+ -+ } -+ dst.SetMapIndex(key, dstSlice) - } -- continue - } -- if !srcElement.CanInterface() { -+ if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { - continue - } - -- if srcElement.CanInterface() { -- srcElement = reflect.ValueOf(srcElement.Interface()) -- if dstElement.IsValid() { -- dstElement = reflect.ValueOf(dstElement.Interface()) -+ if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { -+ if dst.IsNil() { -+ dst.Set(reflect.MakeMap(dst.Type())) - } -+ dst.SetMapIndex(key, srcElement) - } -- dstElement, err = deepMerge(dstElement, srcElement, visited, depth+1, config) -- if err != nil { -- return -- } -- dst.SetMapIndex(key, dstElement) -- - } - case reflect.Slice: -- newSlice := dst -- if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { -- if typeCheck && src.Type() != dst.Type() { -- return dst, fmt.Errorf(""cannot override two slices with different type (%s, %s)"", src.Type(), dst.Type()) -- } -- newSlice = src -- } else if config.AppendSlice { -- if typeCheck && src.Type() != dst.Type() { -- err = fmt.Errorf(""cannot append two slice with different type (%s, %s)"", src.Type(), dst.Type()) -- return -- } -- newSlice = reflect.AppendSlice(dst, src) -- } -- if dst.CanSet() { -- dst.Set(newSlice) -- } else { -- dst = newSlice -- } -- case reflect.Ptr, reflect.Interface: -- if isReflectNil(src) { -+ if !dst.CanSet() { - break - } -+ if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { -+ dst.Set(src) -+ } else if config.AppendSlice { -+ if src.Type() != dst.Type() { -+ return fmt.Errorf(""cannot append two slice with different type (%s, %s)"", src.Type(), dst.Type()) -+ } -+ dst.Set(reflect.AppendSlice(dst, src)) -+ } else if sliceDeepCopy { -+ for i := 0; i < src.Len() && i < dst.Len(); i++ { -+ srcElement := src.Index(i) -+ dstElement := dst.Index(i) -+ if srcElement.CanInterface() { -+ srcElement = reflect.ValueOf(srcElement.Interface()) -+ } -+ if dstElement.CanInterface() { -+ dstElement = reflect.ValueOf(dstElement.Interface()) -+ } - -- if dst.Kind() != reflect.Ptr && src.Type().AssignableTo(dst.Type()) { -- if dst.IsNil() || overwrite { -- if overwrite || isEmptyValue(dst) { -- if dst.CanSet() { -- dst.Set(src) -- } else { -- dst = src -- } -+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { -+ return - } - } -+ } -+ case reflect.Ptr: -+ fallthrough -+ case reflect.Interface: -+ if isReflectNil(src) { -+ if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { -+ dst.Set(src) -+ } - break - } - -@@ -214,33 +243,35 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int, - dst.Set(src) - } - } else if src.Kind() == reflect.Ptr { -- if dst, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { -+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } -- dst = dst.Addr() - } else if dst.Elem().Type() == src.Type() { -- if dst, err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { -+ if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { - return - } - } else { -- return dst, ErrDifferentArgumentsTypes -+ return ErrDifferentArgumentsTypes - } - break - } -+ - if dst.IsNil() || overwrite { -- if (overwrite || isEmptyValue(dst)) && (overwriteWithEmptySrc || !isEmptyValue(src)) { -- if dst.CanSet() { -- dst.Set(src) -- } else { -- dst = src -- } -+ if dst.CanSet() && (overwrite || isEmptyValue(dst)) { -+ dst.Set(src) - } -- } else if _, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { -- return -+ break -+ } -+ -+ if dst.Elem().Kind() == src.Elem().Kind() { -+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { -+ return -+ } -+ break - } - default: -- overwriteFull := (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) -- if overwriteFull { -+ mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) -+ if mustSet { - if dst.CanSet() { - dst.Set(src) - } else { -@@ -281,6 +312,7 @@ func WithOverride(config *Config) { - - // WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. - func WithOverwriteWithEmptyValue(config *Config) { -+ config.Overwrite = true - config.overwriteWithEmptyValue = true - } - -@@ -299,7 +331,16 @@ func WithTypeCheck(config *Config) { - config.TypeCheck = true - } - -+// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. -+func WithSliceDeepCopy(config *Config) { -+ config.sliceDeepCopy = true -+ config.Overwrite = true -+} -+ - func merge(dst, src interface{}, opts ...func(*Config)) error { -+ if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { -+ return ErrNonPointerAgument -+ } - var ( - vDst, vSrc reflect.Value - err error -@@ -314,14 +355,10 @@ func merge(dst, src interface{}, opts ...func(*Config)) error { - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } -- if !vDst.CanSet() { -- return fmt.Errorf(""cannot set dst, needs reference"") -- } - if vDst.Type() != vSrc.Type() { - return ErrDifferentArgumentsTypes - } -- _, err = deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) -- return err -+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - } - - // IsReflectNil is the reflect value provided nil -diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go -index a82fea2fdccc3..3cc926c7f6245 100644 ---- a/vendor/github.com/imdario/mergo/mergo.go -+++ b/vendor/github.com/imdario/mergo/mergo.go -@@ -20,6 +20,7 @@ var ( - ErrNotSupported = errors.New(""only structs and maps are supported"") - ErrExpectedMapAsDestination = errors.New(""dst was expected to be a map"") - ErrExpectedStructAsDestination = errors.New(""dst was expected to be a struct"") -+ ErrNonPointerAgument = errors.New(""dst must be a pointer"") - ) - - // During deepMerge, must keep track of checks that are -@@ -75,23 +76,3 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { - } - return - } -- --// Traverses recursively both values, assigning src's fields values to dst. --// The map argument tracks comparisons that have already been seen, which allows --// short circuiting on recursive types. --func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { -- if dst.CanAddr() { -- addr := dst.UnsafeAddr() -- h := 17 * addr -- seen := visited[h] -- typ := dst.Type() -- for p := seen; p != nil; p = p.next { -- if p.ptr == addr && p.typ == typ { -- return nil -- } -- } -- // Remember, remember... -- visited[h] = &visit{addr, typ, seen} -- } -- return // TODO refactor --} -diff --git a/vendor/github.com/mitchellh/copystructure/.travis.yml b/vendor/github.com/mitchellh/copystructure/.travis.yml -new file mode 100644 -index 0000000000000..d7b9589ab11a7 ---- /dev/null -+++ b/vendor/github.com/mitchellh/copystructure/.travis.yml -@@ -0,0 +1,12 @@ -+language: go -+ -+go: -+ - 1.7 -+ - tip -+ -+script: -+ - go test -+ -+matrix: -+ allow_failures: -+ - go: tip -diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE -new file mode 100644 -index 0000000000000..229851590442a ---- /dev/null -+++ b/vendor/github.com/mitchellh/copystructure/LICENSE -@@ -0,0 +1,21 @@ -+The MIT License (MIT) -+ -+Copyright (c) 2014 Mitchell Hashimoto -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the ""Software""), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -+THE SOFTWARE. -diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md -new file mode 100644 -index 0000000000000..bcb8c8d2cb97b ---- /dev/null -+++ b/vendor/github.com/mitchellh/copystructure/README.md -@@ -0,0 +1,21 @@ -+# copystructure -+ -+copystructure is a Go library for deep copying values in Go. -+ -+This allows you to copy Go values that may contain reference values -+such as maps, slices, or pointers, and copy their data as well instead -+of just their references. -+ -+## Installation -+ -+Standard `go get`: -+ -+``` -+$ go get github.com/mitchellh/copystructure -+``` -+ -+## Usage & Example -+ -+For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). -+ -+The `Copy` function has examples associated with it there. -diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go -new file mode 100644 -index 0000000000000..db6a6aa1a1f4f ---- /dev/null -+++ b/vendor/github.com/mitchellh/copystructure/copier_time.go -@@ -0,0 +1,15 @@ -+package copystructure -+ -+import ( -+ ""reflect"" -+ ""time"" -+) -+ -+func init() { -+ Copiers[reflect.TypeOf(time.Time{})] = timeCopier -+} -+ -+func timeCopier(v interface{}) (interface{}, error) { -+ // Just... copy it. -+ return v.(time.Time), nil -+} -diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go -new file mode 100644 -index 0000000000000..140435255e108 ---- /dev/null -+++ b/vendor/github.com/mitchellh/copystructure/copystructure.go -@@ -0,0 +1,548 @@ -+package copystructure -+ -+import ( -+ ""errors"" -+ ""reflect"" -+ ""sync"" -+ -+ ""github.com/mitchellh/reflectwalk"" -+) -+ -+// Copy returns a deep copy of v. -+func Copy(v interface{}) (interface{}, error) { -+ return Config{}.Copy(v) -+} -+ -+// CopierFunc is a function that knows how to deep copy a specific type. -+// Register these globally with the Copiers variable. -+type CopierFunc func(interface{}) (interface{}, error) -+ -+// Copiers is a map of types that behave specially when they are copied. -+// If a type is found in this map while deep copying, this function -+// will be called to copy it instead of attempting to copy all fields. -+// -+// The key should be the type, obtained using: reflect.TypeOf(value with type). -+// -+// It is unsafe to write to this map after Copies have started. If you -+// are writing to this map while also copying, wrap all modifications to -+// this map as well as to Copy in a mutex. -+var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) -+ -+// Must is a helper that wraps a call to a function returning -+// (interface{}, error) and panics if the error is non-nil. It is intended -+// for use in variable initializations and should only be used when a copy -+// error should be a crashing case. -+func Must(v interface{}, err error) interface{} { -+ if err != nil { -+ panic(""copy error: "" + err.Error()) -+ } -+ -+ return v -+} -+ -+var errPointerRequired = errors.New(""Copy argument must be a pointer when Lock is true"") -+ -+type Config struct { -+ // Lock any types that are a sync.Locker and are not a mutex while copying. -+ // If there is an RLocker method, use that to get the sync.Locker. -+ Lock bool -+ -+ // Copiers is a map of types associated with a CopierFunc. Use the global -+ // Copiers map if this is nil. -+ Copiers map[reflect.Type]CopierFunc -+} -+ -+func (c Config) Copy(v interface{}) (interface{}, error) { -+ if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { -+ return nil, errPointerRequired -+ } -+ -+ w := new(walker) -+ if c.Lock { -+ w.useLocks = true -+ } -+ -+ if c.Copiers == nil { -+ c.Copiers = Copiers -+ } -+ -+ err := reflectwalk.Walk(v, w) -+ if err != nil { -+ return nil, err -+ } -+ -+ // Get the result. If the result is nil, then we want to turn it -+ // into a typed nil if we can. -+ result := w.Result -+ if result == nil { -+ val := reflect.ValueOf(v) -+ result = reflect.Indirect(reflect.New(val.Type())).Interface() -+ } -+ -+ return result, nil -+} -+ -+// Return the key used to index interfaces types we've seen. Store the number -+// of pointers in the upper 32bits, and the depth in the lower 32bits. This is -+// easy to calculate, easy to match a key with our current depth, and we don't -+// need to deal with initializing and cleaning up nested maps or slices. -+func ifaceKey(pointers, depth int) uint64 { -+ return uint64(pointers)<<32 | uint64(depth) -+} -+ -+type walker struct { -+ Result interface{} -+ -+ depth int -+ ignoreDepth int -+ vals []reflect.Value -+ cs []reflect.Value -+ -+ // This stores the number of pointers we've walked over, indexed by depth. -+ ps []int -+ -+ // If an interface is indirected by a pointer, we need to know the type of -+ // interface to create when creating the new value. Store the interface -+ // types here, indexed by both the walk depth and the number of pointers -+ // already seen at that depth. Use ifaceKey to calculate the proper uint64 -+ // value. -+ ifaceTypes map[uint64]reflect.Type -+ -+ // any locks we've taken, indexed by depth -+ locks []sync.Locker -+ // take locks while walking the structure -+ useLocks bool -+} -+ -+func (w *walker) Enter(l reflectwalk.Location) error { -+ w.depth++ -+ -+ // ensure we have enough elements to index via w.depth -+ for w.depth >= len(w.locks) { -+ w.locks = append(w.locks, nil) -+ } -+ -+ for len(w.ps) < w.depth+1 { -+ w.ps = append(w.ps, 0) -+ } -+ -+ return nil -+} -+ -+func (w *walker) Exit(l reflectwalk.Location) error { -+ locker := w.locks[w.depth] -+ w.locks[w.depth] = nil -+ if locker != nil { -+ defer locker.Unlock() -+ } -+ -+ // clear out pointers and interfaces as we exit the stack -+ w.ps[w.depth] = 0 -+ -+ for k := range w.ifaceTypes { -+ mask := uint64(^uint32(0)) -+ if k&mask == uint64(w.depth) { -+ delete(w.ifaceTypes, k) -+ } -+ } -+ -+ w.depth-- -+ if w.ignoreDepth > w.depth { -+ w.ignoreDepth = 0 -+ } -+ -+ if w.ignoring() { -+ return nil -+ } -+ -+ switch l { -+ case reflectwalk.Array: -+ fallthrough -+ case reflectwalk.Map: -+ fallthrough -+ case reflectwalk.Slice: -+ w.replacePointerMaybe() -+ -+ // Pop map off our container -+ w.cs = w.cs[:len(w.cs)-1] -+ case reflectwalk.MapValue: -+ // Pop off the key and value -+ mv := w.valPop() -+ mk := w.valPop() -+ m := w.cs[len(w.cs)-1] -+ -+ // If mv is the zero value, SetMapIndex deletes the key form the map, -+ // or in this case never adds it. We need to create a properly typed -+ // zero value so that this key can be set. -+ if !mv.IsValid() { -+ mv = reflect.Zero(m.Elem().Type().Elem()) -+ } -+ m.Elem().SetMapIndex(mk, mv) -+ case reflectwalk.ArrayElem: -+ // Pop off the value and the index and set it on the array -+ v := w.valPop() -+ i := w.valPop().Interface().(int) -+ if v.IsValid() { -+ a := w.cs[len(w.cs)-1] -+ ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call -+ if ae.CanSet() { -+ ae.Set(v) -+ } -+ } -+ case reflectwalk.SliceElem: -+ // Pop off the value and the index and set it on the slice -+ v := w.valPop() -+ i := w.valPop().Interface().(int) -+ if v.IsValid() { -+ s := w.cs[len(w.cs)-1] -+ se := s.Elem().Index(i) -+ if se.CanSet() { -+ se.Set(v) -+ } -+ } -+ case reflectwalk.Struct: -+ w.replacePointerMaybe() -+ -+ // Remove the struct from the container stack -+ w.cs = w.cs[:len(w.cs)-1] -+ case reflectwalk.StructField: -+ // Pop off the value and the field -+ v := w.valPop() -+ f := w.valPop().Interface().(reflect.StructField) -+ if v.IsValid() { -+ s := w.cs[len(w.cs)-1] -+ sf := reflect.Indirect(s).FieldByName(f.Name) -+ -+ if sf.CanSet() { -+ sf.Set(v) -+ } -+ } -+ case reflectwalk.WalkLoc: -+ // Clear out the slices for GC -+ w.cs = nil -+ w.vals = nil -+ } -+ -+ return nil -+} -+ -+func (w *walker) Map(m reflect.Value) error { -+ if w.ignoring() { -+ return nil -+ } -+ w.lock(m) -+ -+ // Create the map. If the map itself is nil, then just make a nil map -+ var newMap reflect.Value -+ if m.IsNil() { -+ newMap = reflect.New(m.Type()) -+ } else { -+ newMap = wrapPtr(reflect.MakeMap(m.Type())) -+ } -+ -+ w.cs = append(w.cs, newMap) -+ w.valPush(newMap) -+ return nil -+} -+ -+func (w *walker) MapElem(m, k, v reflect.Value) error { -+ return nil -+} -+ -+func (w *walker) PointerEnter(v bool) error { -+ if v { -+ w.ps[w.depth]++ -+ } -+ return nil -+} -+ -+func (w *walker) PointerExit(v bool) error { -+ if v { -+ w.ps[w.depth]-- -+ } -+ return nil -+} -+ -+func (w *walker) Interface(v reflect.Value) error { -+ if !v.IsValid() { -+ return nil -+ } -+ if w.ifaceTypes == nil { -+ w.ifaceTypes = make(map[uint64]reflect.Type) -+ } -+ -+ w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() -+ return nil -+} -+ -+func (w *walker) Primitive(v reflect.Value) error { -+ if w.ignoring() { -+ return nil -+ } -+ w.lock(v) -+ -+ // IsValid verifies the v is non-zero and CanInterface verifies -+ // that we're allowed to read this value (unexported fields). -+ var newV reflect.Value -+ if v.IsValid() && v.CanInterface() { -+ newV = reflect.New(v.Type()) -+ newV.Elem().Set(v) -+ } -+ -+ w.valPush(newV) -+ w.replacePointerMaybe() -+ return nil -+} -+ -+func (w *walker) Slice(s reflect.Value) error { -+ if w.ignoring() { -+ return nil -+ } -+ w.lock(s) -+ -+ var newS reflect.Value -+ if s.IsNil() { -+ newS = reflect.New(s.Type()) -+ } else { -+ newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) -+ } -+ -+ w.cs = append(w.cs, newS) -+ w.valPush(newS) -+ return nil -+} -+ -+func (w *walker) SliceElem(i int, elem reflect.Value) error { -+ if w.ignoring() { -+ return nil -+ } -+ -+ // We don't write the slice here because elem might still be -+ // arbitrarily complex. Just record the index and continue on. -+ w.valPush(reflect.ValueOf(i)) -+ -+ return nil -+} -+ -+func (w *walker) Array(a reflect.Value) error { -+ if w.ignoring() { -+ return nil -+ } -+ w.lock(a) -+ -+ newA := reflect.New(a.Type()) -+ -+ w.cs = append(w.cs, newA) -+ w.valPush(newA) -+ return nil -+} -+ -+func (w *walker) ArrayElem(i int, elem reflect.Value) error { -+ if w.ignoring() { -+ return nil -+ } -+ -+ // We don't write the array here because elem might still be -+ // arbitrarily complex. Just record the index and continue on. -+ w.valPush(reflect.ValueOf(i)) -+ -+ return nil -+} -+ -+func (w *walker) Struct(s reflect.Value) error { -+ if w.ignoring() { -+ return nil -+ } -+ w.lock(s) -+ -+ var v reflect.Value -+ if c, ok := Copiers[s.Type()]; ok { -+ // We have a Copier for this struct, so we use that copier to -+ // get the copy, and we ignore anything deeper than this. -+ w.ignoreDepth = w.depth -+ -+ dup, err := c(s.Interface()) -+ if err != nil { -+ return err -+ } -+ -+ // We need to put a pointer to the value on the value stack, -+ // so allocate a new pointer and set it. -+ v = reflect.New(s.Type()) -+ reflect.Indirect(v).Set(reflect.ValueOf(dup)) -+ } else { -+ // No copier, we copy ourselves and allow reflectwalk to guide -+ // us deeper into the structure for copying. -+ v = reflect.New(s.Type()) -+ } -+ -+ // Push the value onto the value stack for setting the struct field, -+ // and add the struct itself to the containers stack in case we walk -+ // deeper so that its own fields can be modified. -+ w.valPush(v) -+ w.cs = append(w.cs, v) -+ -+ return nil -+} -+ -+func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { -+ if w.ignoring() { -+ return nil -+ } -+ -+ // If PkgPath is non-empty, this is a private (unexported) field. -+ // We do not set this unexported since the Go runtime doesn't allow us. -+ if f.PkgPath != """" { -+ return reflectwalk.SkipEntry -+ } -+ -+ // Push the field onto the stack, we'll handle it when we exit -+ // the struct field in Exit... -+ w.valPush(reflect.ValueOf(f)) -+ return nil -+} -+ -+// ignore causes the walker to ignore any more values until we exit this on -+func (w *walker) ignore() { -+ w.ignoreDepth = w.depth -+} -+ -+func (w *walker) ignoring() bool { -+ return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth -+} -+ -+func (w *walker) pointerPeek() bool { -+ return w.ps[w.depth] > 0 -+} -+ -+func (w *walker) valPop() reflect.Value { -+ result := w.vals[len(w.vals)-1] -+ w.vals = w.vals[:len(w.vals)-1] -+ -+ // If we're out of values, that means we popped everything off. In -+ // this case, we reset the result so the next pushed value becomes -+ // the result. -+ if len(w.vals) == 0 { -+ w.Result = nil -+ } -+ -+ return result -+} -+ -+func (w *walker) valPush(v reflect.Value) { -+ w.vals = append(w.vals, v) -+ -+ // If we haven't set the result yet, then this is the result since -+ // it is the first (outermost) value we're seeing. -+ if w.Result == nil && v.IsValid() { -+ w.Result = v.Interface() -+ } -+} -+ -+func (w *walker) replacePointerMaybe() { -+ // Determine the last pointer value. If it is NOT a pointer, then -+ // we need to push that onto the stack. -+ if !w.pointerPeek() { -+ w.valPush(reflect.Indirect(w.valPop())) -+ return -+ } -+ -+ v := w.valPop() -+ -+ // If the expected type is a pointer to an interface of any depth, -+ // such as *interface{}, **interface{}, etc., then we need to convert -+ // the value ""v"" from *CONCRETE to *interface{} so types match for -+ // Set. -+ // -+ // Example if v is type *Foo where Foo is a struct, v would become -+ // *interface{} instead. This only happens if we have an interface expectation -+ // at this depth. -+ // -+ // For more info, see GH-16 -+ if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { -+ y := reflect.New(iType) // Create *interface{} -+ y.Elem().Set(reflect.Indirect(v)) // Assign ""Foo"" to interface{} (dereferenced) -+ v = y // v is now typed *interface{} (where *v = Foo) -+ } -+ -+ for i := 1; i < w.ps[w.depth]; i++ { -+ if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { -+ iface := reflect.New(iType).Elem() -+ iface.Set(v) -+ v = iface -+ } -+ -+ p := reflect.New(v.Type()) -+ p.Elem().Set(v) -+ v = p -+ } -+ -+ w.valPush(v) -+} -+ -+// if this value is a Locker, lock it and add it to the locks slice -+func (w *walker) lock(v reflect.Value) { -+ if !w.useLocks { -+ return -+ } -+ -+ if !v.IsValid() || !v.CanInterface() { -+ return -+ } -+ -+ type rlocker interface { -+ RLocker() sync.Locker -+ } -+ -+ var locker sync.Locker -+ -+ // We can't call Interface() on a value directly, since that requires -+ // a copy. This is OK, since the pointer to a value which is a sync.Locker -+ // is also a sync.Locker. -+ if v.Kind() == reflect.Ptr { -+ switch l := v.Interface().(type) { -+ case rlocker: -+ // don't lock a mutex directly -+ if _, ok := l.(*sync.RWMutex); !ok { -+ locker = l.RLocker() -+ } -+ case sync.Locker: -+ locker = l -+ } -+ } else if v.CanAddr() { -+ switch l := v.Addr().Interface().(type) { -+ case rlocker: -+ // don't lock a mutex directly -+ if _, ok := l.(*sync.RWMutex); !ok { -+ locker = l.RLocker() -+ } -+ case sync.Locker: -+ locker = l -+ } -+ } -+ -+ // still no callable locker -+ if locker == nil { -+ return -+ } -+ -+ // don't lock a mutex directly -+ switch locker.(type) { -+ case *sync.Mutex, *sync.RWMutex: -+ return -+ } -+ -+ locker.Lock() -+ w.locks[w.depth] = locker -+} -+ -+// wrapPtr is a helper that takes v and always make it *v. copystructure -+// stores things internally as pointers until the last moment before unwrapping -+func wrapPtr(v reflect.Value) reflect.Value { -+ if !v.IsValid() { -+ return v -+ } -+ vPtr := reflect.New(v.Type()) -+ vPtr.Elem().Set(v) -+ return vPtr -+} -diff --git a/vendor/github.com/mitchellh/copystructure/go.mod b/vendor/github.com/mitchellh/copystructure/go.mod -new file mode 100644 -index 0000000000000..d01864309b403 ---- /dev/null -+++ b/vendor/github.com/mitchellh/copystructure/go.mod -@@ -0,0 +1,3 @@ -+module github.com/mitchellh/copystructure -+ -+require github.com/mitchellh/reflectwalk v1.0.0 -diff --git a/vendor/github.com/mitchellh/copystructure/go.sum b/vendor/github.com/mitchellh/copystructure/go.sum -new file mode 100644 -index 0000000000000..be572456190a8 ---- /dev/null -+++ b/vendor/github.com/mitchellh/copystructure/go.sum -@@ -0,0 +1,2 @@ -+github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= -+github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -diff --git a/vendor/github.com/mitchellh/reflectwalk/.travis.yml b/vendor/github.com/mitchellh/reflectwalk/.travis.yml -new file mode 100644 -index 0000000000000..4f2ee4d973389 ---- /dev/null -+++ b/vendor/github.com/mitchellh/reflectwalk/.travis.yml -@@ -0,0 +1 @@ -+language: go -diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE -new file mode 100644 -index 0000000000000..f9c841a51e0d1 ---- /dev/null -+++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE -@@ -0,0 +1,21 @@ -+The MIT License (MIT) -+ -+Copyright (c) 2013 Mitchell Hashimoto -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the ""Software""), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -+THE SOFTWARE. -diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md -new file mode 100644 -index 0000000000000..ac82cd2e159fb ---- /dev/null -+++ b/vendor/github.com/mitchellh/reflectwalk/README.md -@@ -0,0 +1,6 @@ -+# reflectwalk -+ -+reflectwalk is a Go library for ""walking"" a value in Go using reflection, -+in the same way a directory tree can be ""walked"" on the filesystem. Walking -+a complex structure can allow you to do manipulations on unknown structures -+such as those decoded from JSON. -diff --git a/vendor/github.com/mitchellh/reflectwalk/go.mod b/vendor/github.com/mitchellh/reflectwalk/go.mod -new file mode 100644 -index 0000000000000..52bb7c469e933 ---- /dev/null -+++ b/vendor/github.com/mitchellh/reflectwalk/go.mod -@@ -0,0 +1 @@ -+module github.com/mitchellh/reflectwalk -diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go -new file mode 100644 -index 0000000000000..6a7f176117f91 ---- /dev/null -+++ b/vendor/github.com/mitchellh/reflectwalk/location.go -@@ -0,0 +1,19 @@ -+package reflectwalk -+ -+//go:generate stringer -type=Location location.go -+ -+type Location uint -+ -+const ( -+ None Location = iota -+ Map -+ MapKey -+ MapValue -+ Slice -+ SliceElem -+ Array -+ ArrayElem -+ Struct -+ StructField -+ WalkLoc -+) -diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go -new file mode 100644 -index 0000000000000..70760cf4c705f ---- /dev/null -+++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go -@@ -0,0 +1,16 @@ -+// Code generated by ""stringer -type=Location location.go""; DO NOT EDIT. -+ -+package reflectwalk -+ -+import ""fmt"" -+ -+const _Location_name = ""NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc"" -+ -+var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73} -+ -+func (i Location) String() string { -+ if i >= Location(len(_Location_index)-1) { -+ return fmt.Sprintf(""Location(%d)"", i) -+ } -+ return _Location_name[_Location_index[i]:_Location_index[i+1]] -+} -diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go -new file mode 100644 -index 0000000000000..3a93a0b114d49 ---- /dev/null -+++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go -@@ -0,0 +1,402 @@ -+// reflectwalk is a package that allows you to ""walk"" complex structures -+// similar to how you may ""walk"" a filesystem: visiting every element one -+// by one and calling callback functions allowing you to handle and manipulate -+// those elements. -+package reflectwalk -+ -+import ( -+ ""errors"" -+ ""reflect"" -+) -+ -+// PrimitiveWalker implementations are able to handle primitive values -+// within complex structures. Primitive values are numbers, strings, -+// booleans, funcs, chans. -+// -+// These primitive values are often members of more complex -+// structures (slices, maps, etc.) that are walkable by other interfaces. -+type PrimitiveWalker interface { -+ Primitive(reflect.Value) error -+} -+ -+// InterfaceWalker implementations are able to handle interface values as they -+// are encountered during the walk. -+type InterfaceWalker interface { -+ Interface(reflect.Value) error -+} -+ -+// MapWalker implementations are able to handle individual elements -+// found within a map structure. -+type MapWalker interface { -+ Map(m reflect.Value) error -+ MapElem(m, k, v reflect.Value) error -+} -+ -+// SliceWalker implementations are able to handle slice elements found -+// within complex structures. -+type SliceWalker interface { -+ Slice(reflect.Value) error -+ SliceElem(int, reflect.Value) error -+} -+ -+// ArrayWalker implementations are able to handle array elements found -+// within complex structures. -+type ArrayWalker interface { -+ Array(reflect.Value) error -+ ArrayElem(int, reflect.Value) error -+} -+ -+// StructWalker is an interface that has methods that are called for -+// structs when a Walk is done. -+type StructWalker interface { -+ Struct(reflect.Value) error -+ StructField(reflect.StructField, reflect.Value) error -+} -+ -+// EnterExitWalker implementations are notified before and after -+// they walk deeper into complex structures (into struct fields, -+// into slice elements, etc.) -+type EnterExitWalker interface { -+ Enter(Location) error -+ Exit(Location) error -+} -+ -+// PointerWalker implementations are notified when the value they're -+// walking is a pointer or not. Pointer is called for _every_ value whether -+// it is a pointer or not. -+type PointerWalker interface { -+ PointerEnter(bool) error -+ PointerExit(bool) error -+} -+ -+// SkipEntry can be returned from walk functions to skip walking -+// the value of this field. This is only valid in the following functions: -+// -+// - Struct: skips all fields from being walked -+// - StructField: skips walking the struct value -+// -+var SkipEntry = errors.New(""skip this entry"") -+ -+// Walk takes an arbitrary value and an interface and traverses the -+// value, calling callbacks on the interface if they are supported. -+// The interface should implement one or more of the walker interfaces -+// in this package, such as PrimitiveWalker, StructWalker, etc. -+func Walk(data, walker interface{}) (err error) { -+ v := reflect.ValueOf(data) -+ ew, ok := walker.(EnterExitWalker) -+ if ok { -+ err = ew.Enter(WalkLoc) -+ } -+ -+ if err == nil { -+ err = walk(v, walker) -+ } -+ -+ if ok && err == nil { -+ err = ew.Exit(WalkLoc) -+ } -+ -+ return -+} -+ -+func walk(v reflect.Value, w interface{}) (err error) { -+ // Determine if we're receiving a pointer and if so notify the walker. -+ // The logic here is convoluted but very important (tests will fail if -+ // almost any part is changed). I will try to explain here. -+ // -+ // First, we check if the value is an interface, if so, we really need -+ // to check the interface's VALUE to see whether it is a pointer. -+ // -+ // Check whether the value is then a pointer. If so, then set pointer -+ // to true to notify the user. -+ // -+ // If we still have a pointer or an interface after the indirections, then -+ // we unwrap another level -+ // -+ // At this time, we also set ""v"" to be the dereferenced value. This is -+ // because once we've unwrapped the pointer we want to use that value. -+ pointer := false -+ pointerV := v -+ -+ for { -+ if pointerV.Kind() == reflect.Interface { -+ if iw, ok := w.(InterfaceWalker); ok { -+ if err = iw.Interface(pointerV); err != nil { -+ return -+ } -+ } -+ -+ pointerV = pointerV.Elem() -+ } -+ -+ if pointerV.Kind() == reflect.Ptr { -+ pointer = true -+ v = reflect.Indirect(pointerV) -+ } -+ if pw, ok := w.(PointerWalker); ok { -+ if err = pw.PointerEnter(pointer); err != nil { -+ return -+ } -+ -+ defer func(pointer bool) { -+ if err != nil { -+ return -+ } -+ -+ err = pw.PointerExit(pointer) -+ }(pointer) -+ } -+ -+ if pointer { -+ pointerV = v -+ } -+ pointer = false -+ -+ // If we still have a pointer or interface we have to indirect another level. -+ switch pointerV.Kind() { -+ case reflect.Ptr, reflect.Interface: -+ continue -+ } -+ break -+ } -+ -+ // We preserve the original value here because if it is an interface -+ // type, we want to pass that directly into the walkPrimitive, so that -+ // we can set it. -+ originalV := v -+ if v.Kind() == reflect.Interface { -+ v = v.Elem() -+ } -+ -+ k := v.Kind() -+ if k >= reflect.Int && k <= reflect.Complex128 { -+ k = reflect.Int -+ } -+ -+ switch k { -+ // Primitives -+ case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: -+ err = walkPrimitive(originalV, w) -+ return -+ case reflect.Map: -+ err = walkMap(v, w) -+ return -+ case reflect.Slice: -+ err = walkSlice(v, w) -+ return -+ case reflect.Struct: -+ err = walkStruct(v, w) -+ return -+ case reflect.Array: -+ err = walkArray(v, w) -+ return -+ default: -+ panic(""unsupported type: "" + k.String()) -+ } -+} -+ -+func walkMap(v reflect.Value, w interface{}) error { -+ ew, ewok := w.(EnterExitWalker) -+ if ewok { -+ ew.Enter(Map) -+ } -+ -+ if mw, ok := w.(MapWalker); ok { -+ if err := mw.Map(v); err != nil { -+ return err -+ } -+ } -+ -+ for _, k := range v.MapKeys() { -+ kv := v.MapIndex(k) -+ -+ if mw, ok := w.(MapWalker); ok { -+ if err := mw.MapElem(v, k, kv); err != nil { -+ return err -+ } -+ } -+ -+ ew, ok := w.(EnterExitWalker) -+ if ok { -+ ew.Enter(MapKey) -+ } -+ -+ if err := walk(k, w); err != nil { -+ return err -+ } -+ -+ if ok { -+ ew.Exit(MapKey) -+ ew.Enter(MapValue) -+ } -+ -+ // get the map value again as it may have changed in the MapElem call -+ if err := walk(v.MapIndex(k), w); err != nil { -+ return err -+ } -+ -+ if ok { -+ ew.Exit(MapValue) -+ } -+ } -+ -+ if ewok { -+ ew.Exit(Map) -+ } -+ -+ return nil -+} -+ -+func walkPrimitive(v reflect.Value, w interface{}) error { -+ if pw, ok := w.(PrimitiveWalker); ok { -+ return pw.Primitive(v) -+ } -+ -+ return nil -+} -+ -+func walkSlice(v reflect.Value, w interface{}) (err error) { -+ ew, ok := w.(EnterExitWalker) -+ if ok { -+ ew.Enter(Slice) -+ } -+ -+ if sw, ok := w.(SliceWalker); ok { -+ if err := sw.Slice(v); err != nil { -+ return err -+ } -+ } -+ -+ for i := 0; i < v.Len(); i++ { -+ elem := v.Index(i) -+ -+ if sw, ok := w.(SliceWalker); ok { -+ if err := sw.SliceElem(i, elem); err != nil { -+ return err -+ } -+ } -+ -+ ew, ok := w.(EnterExitWalker) -+ if ok { -+ ew.Enter(SliceElem) -+ } -+ -+ if err := walk(elem, w); err != nil { -+ return err -+ } -+ -+ if ok { -+ ew.Exit(SliceElem) -+ } -+ } -+ -+ ew, ok = w.(EnterExitWalker) -+ if ok { -+ ew.Exit(Slice) -+ } -+ -+ return nil -+} -+ -+func walkArray(v reflect.Value, w interface{}) (err error) { -+ ew, ok := w.(EnterExitWalker) -+ if ok { -+ ew.Enter(Array) -+ } -+ -+ if aw, ok := w.(ArrayWalker); ok { -+ if err := aw.Array(v); err != nil { -+ return err -+ } -+ } -+ -+ for i := 0; i < v.Len(); i++ { -+ elem := v.Index(i) -+ -+ if aw, ok := w.(ArrayWalker); ok { -+ if err := aw.ArrayElem(i, elem); err != nil { -+ return err -+ } -+ } -+ -+ ew, ok := w.(EnterExitWalker) -+ if ok { -+ ew.Enter(ArrayElem) -+ } -+ -+ if err := walk(elem, w); err != nil { -+ return err -+ } -+ -+ if ok { -+ ew.Exit(ArrayElem) -+ } -+ } -+ -+ ew, ok = w.(EnterExitWalker) -+ if ok { -+ ew.Exit(Array) -+ } -+ -+ return nil -+} -+ -+func walkStruct(v reflect.Value, w interface{}) (err error) { -+ ew, ewok := w.(EnterExitWalker) -+ if ewok { -+ ew.Enter(Struct) -+ } -+ -+ skip := false -+ if sw, ok := w.(StructWalker); ok { -+ err = sw.Struct(v) -+ if err == SkipEntry { -+ skip = true -+ err = nil -+ } -+ if err != nil { -+ return -+ } -+ } -+ -+ if !skip { -+ vt := v.Type() -+ for i := 0; i < vt.NumField(); i++ { -+ sf := vt.Field(i) -+ f := v.FieldByIndex([]int{i}) -+ -+ if sw, ok := w.(StructWalker); ok { -+ err = sw.StructField(sf, f) -+ -+ // SkipEntry just pretends this field doesn't even exist -+ if err == SkipEntry { -+ continue -+ } -+ -+ if err != nil { -+ return -+ } -+ } -+ -+ ew, ok := w.(EnterExitWalker) -+ if ok { -+ ew.Enter(StructField) -+ } -+ -+ err = walk(f, w) -+ if err != nil { -+ return -+ } -+ -+ if ok { -+ ew.Exit(StructField) -+ } -+ } -+ } -+ -+ if ewok { -+ ew.Exit(Struct) -+ } -+ -+ return nil -+} -diff --git a/vendor/github.com/shopspring/decimal/.gitignore b/vendor/github.com/shopspring/decimal/.gitignore -new file mode 100644 -index 0000000000000..8a43ce9d7b6b6 ---- /dev/null -+++ b/vendor/github.com/shopspring/decimal/.gitignore -@@ -0,0 +1,6 @@ -+.git -+*.swp -+ -+# IntelliJ -+.idea/ -+*.iml -diff --git a/vendor/github.com/shopspring/decimal/.travis.yml b/vendor/github.com/shopspring/decimal/.travis.yml -new file mode 100644 -index 0000000000000..55d42b289d09f ---- /dev/null -+++ b/vendor/github.com/shopspring/decimal/.travis.yml -@@ -0,0 +1,13 @@ -+language: go -+ -+go: -+ - 1.7.x -+ - 1.12.x -+ - 1.13.x -+ - tip -+ -+install: -+ - go build . -+ -+script: -+ - go test -v -diff --git a/vendor/github.com/shopspring/decimal/CHANGELOG.md b/vendor/github.com/shopspring/decimal/CHANGELOG.md -new file mode 100644 -index 0000000000000..01ba02feb2c7b ---- /dev/null -+++ b/vendor/github.com/shopspring/decimal/CHANGELOG.md -@@ -0,0 +1,19 @@ -+## Decimal v1.2.0 -+ -+#### BREAKING -+- Drop support for Go version older than 1.7 [#172](https://github.com/shopspring/decimal/pull/172) -+ -+#### FEATURES -+- Add NewFromInt and NewFromInt32 initializers [#72](https://github.com/shopspring/decimal/pull/72) -+- Add support for Go modules [#157](https://github.com/shopspring/decimal/pull/157) -+- Add BigInt, BigFloat helper methods [#171](https://github.com/shopspring/decimal/pull/171) -+ -+#### ENHANCEMENTS -+- Memory usage optimization [#160](https://github.com/shopspring/decimal/pull/160) -+- Updated travis CI golang versions [#156](https://github.com/shopspring/decimal/pull/156) -+- Update documentation [#173](https://github.com/shopspring/decimal/pull/173) -+- Improve code quality [#174](https://github.com/shopspring/decimal/pull/174) -+ -+#### BUGFIXES -+- Revert remove insignificant digits [#159](https://github.com/shopspring/decimal/pull/159) -+- Remove 15 interval for RoundCash [#166](https://github.com/shopspring/decimal/pull/166) -diff --git a/vendor/github.com/shopspring/decimal/LICENSE b/vendor/github.com/shopspring/decimal/LICENSE -new file mode 100644 -index 0000000000000..ad2148aaf93e3 ---- /dev/null -+++ b/vendor/github.com/shopspring/decimal/LICENSE -@@ -0,0 +1,45 @@ -+The MIT License (MIT) -+ -+Copyright (c) 2015 Spring, Inc. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the ""Software""), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -+THE SOFTWARE. -+ -+- Based on https://github.com/oguzbilgic/fpd, which has the following license: -+"""""" -+The MIT License (MIT) -+ -+Copyright (c) 2013 Oguz Bilgic -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy of -+this software and associated documentation files (the ""Software""), to deal in -+the Software without restriction, including without limitation the rights to -+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -+the Software, and to permit persons to whom the Software is furnished to do so, -+subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in all -+copies or substantial portions of the Software. -+ -+THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+"""""" -diff --git a/vendor/github.com/shopspring/decimal/README.md b/vendor/github.com/shopspring/decimal/README.md -new file mode 100644 -index 0000000000000..b70f901593517 ---- /dev/null -+++ b/vendor/github.com/shopspring/decimal/README.md -@@ -0,0 +1,130 @@ -+# decimal -+ -+[![Build Status](https://travis-ci.org/shopspring/decimal.png?branch=master)](https://travis-ci.org/shopspring/decimal) [![GoDoc](https://godoc.org/github.com/shopspring/decimal?status.svg)](https://godoc.org/github.com/shopspring/decimal) [![Go Report Card](https://goreportcard.com/badge/github.com/shopspring/decimal)](https://goreportcard.com/report/github.com/shopspring/decimal) -+ -+Arbitrary-precision fixed-point decimal numbers in go. -+ -+_Note:_ Decimal library can ""only"" represent numbers with a maximum of 2^31 digits after the decimal point. -+ -+## Features -+ -+ * The zero-value is 0, and is safe to use without initialization -+ * Addition, subtraction, multiplication with no loss of precision -+ * Division with specified precision -+ * Database/sql serialization/deserialization -+ * JSON and XML serialization/deserialization -+ -+## Install -+ -+Run `go get github.com/shopspring/decimal` -+ -+## Requirements -+ -+Decimal library requires Go version `>=1.7` -+ -+## Usage -+ -+```go -+package main -+ -+import ( -+ ""fmt"" -+ ""github.com/shopspring/decimal"" -+) -+ -+func main() { -+ price, err := decimal.NewFromString(""136.02"") -+ if err != nil { -+ panic(err) -+ } -+ -+ quantity := decimal.NewFromInt(3) -+ -+ fee, _ := decimal.NewFromString("".035"") -+ taxRate, _ := decimal.NewFromString("".08875"") -+ -+ subtotal := price.Mul(quantity) -+ -+ preTax := subtotal.Mul(fee.Add(decimal.NewFromFloat(1))) -+ -+ total := preTax.Mul(taxRate.Add(decimal.NewFromFloat(1))) -+ -+ fmt.Println(""Subtotal:"", subtotal) // Subtotal: 408.06 -+ fmt.Println(""Pre-tax:"", preTax) // Pre-tax: 422.3421 -+ fmt.Println(""Taxes:"", total.Sub(preTax)) // Taxes: 37.482861375 -+ fmt.Println(""Total:"", total) // Total: 459.824961375 -+ fmt.Println(""Tax rate:"", total.Sub(preTax).Div(preTax)) // Tax rate: 0.08875 -+} -+``` -+ -+## Documentation -+ -+http://godoc.org/github.com/shopspring/decimal -+ -+## Production Usage -+ -+* [Spring](https://shopspring.com/), since August 14, 2014. -+* If you are using this in production, please let us know! -+ -+## FAQ -+ -+#### Why don't you just use float64? -+ -+Because float64 (or any binary floating point type, actually) can't represent -+numbers such as `0.1` exactly. -+ -+Consider this code: http://play.golang.org/p/TQBd4yJe6B You might expect that -+it prints out `10`, but it actually prints `9.999999999999831`. Over time, -+these small errors can really add up! -+ -+#### Why don't you just use big.Rat? -+ -+big.Rat is fine for representing rational numbers, but Decimal is better for -+representing money. Why? Here's a (contrived) example: -+ -+Let's say you use big.Rat, and you have two numbers, x and y, both -+representing 1/3, and you have `z = 1 - x - y = 1/3`. If you print each one -+out, the string output has to stop somewhere (let's say it stops at 3 decimal -+digits, for simplicity), so you'll get 0.333, 0.333, and 0.333. But where did -+the other 0.001 go? -+ -+Here's the above example as code: http://play.golang.org/p/lCZZs0w9KE -+ -+With Decimal, the strings being printed out represent the number exactly. So, -+if you have `x = y = 1/3` (with precision 3), they will actually be equal to -+0.333, and when you do `z = 1 - x - y`, `z` will be equal to .334. No money is -+unaccounted for! -+ -+You still have to be careful. If you want to split a number `N` 3 ways, you -+can't just send `N/3` to three different people. You have to pick one to send -+`N - (2/3*N)` to. That person will receive the fraction of a penny remainder. -+ -+But, it is much easier to be careful with Decimal than with big.Rat. -+ -+#### Why isn't the API similar to big.Int's? -+ -+big.Int's API is built to reduce the number of memory allocations for maximal -+performance. This makes sense for its use-case, but the trade-off is that the -+API is awkward and easy to misuse. -+ -+For example, to add two big.Ints, you do: `z := new(big.Int).Add(x, y)`. A -+developer unfamiliar with this API might try to do `z := a.Add(a, b)`. This -+modifies `a` and sets `z` as an alias for `a`, which they might not expect. It -+also modifies any other aliases to `a`. -+ -+Here's an example of the subtle bugs you can introduce with big.Int's API: -+https://play.golang.org/p/x2R_78pa8r -+ -+In contrast, it's difficult to make such mistakes with decimal. Decimals -+behave like other go numbers types: even though `a = b` will not deep copy -+`b` into `a`, it is impossible to modify a Decimal, since all Decimal methods -+return new Decimals and do not modify the originals. The downside is that -+this causes extra allocations, so Decimal is less performant. My assumption -+is that if you're using Decimals, you probably care more about correctness -+than performance. -+ -+## License -+ -+The MIT License (MIT) -+ -+This is a heavily modified fork of [fpd.Decimal](https://github.com/oguzbilgic/fpd), which was also released under the MIT License. -diff --git a/vendor/github.com/shopspring/decimal/decimal-go.go b/vendor/github.com/shopspring/decimal/decimal-go.go -new file mode 100644 -index 0000000000000..9958d6902063f ---- /dev/null -+++ b/vendor/github.com/shopspring/decimal/decimal-go.go -@@ -0,0 +1,415 @@ -+// Copyright 2009 The Go Authors. All rights reserved. -+// Use of this source code is governed by a BSD-style -+// license that can be found in the LICENSE file. -+ -+// Multiprecision decimal numbers. -+// For floating-point formatting only; not general purpose. -+// Only operations are assign and (binary) left/right shift. -+// Can do binary floating point in multiprecision decimal precisely -+// because 2 divides 10; cannot do decimal floating point -+// in multiprecision binary precisely. -+ -+package decimal -+ -+type decimal struct { -+ d [800]byte // digits, big-endian representation -+ nd int // number of digits used -+ dp int // decimal point -+ neg bool // negative flag -+ trunc bool // discarded nonzero digits beyond d[:nd] -+} -+ -+func (a *decimal) String() string { -+ n := 10 + a.nd -+ if a.dp > 0 { -+ n += a.dp -+ } -+ if a.dp < 0 { -+ n += -a.dp -+ } -+ -+ buf := make([]byte, n) -+ w := 0 -+ switch { -+ case a.nd == 0: -+ return ""0"" -+ -+ case a.dp <= 0: -+ // zeros fill space between decimal point and digits -+ buf[w] = '0' -+ w++ -+ buf[w] = '.' -+ w++ -+ w += digitZero(buf[w : w+-a.dp]) -+ w += copy(buf[w:], a.d[0:a.nd]) -+ -+ case a.dp < a.nd: -+ // decimal point in middle of digits -+ w += copy(buf[w:], a.d[0:a.dp]) -+ buf[w] = '.' -+ w++ -+ w += copy(buf[w:], a.d[a.dp:a.nd]) -+ -+ default: -+ // zeros fill space between digits and decimal point -+ w += copy(buf[w:], a.d[0:a.nd]) -+ w += digitZero(buf[w : w+a.dp-a.nd]) -+ } -+ return string(buf[0:w]) -+} -+ -+func digitZero(dst []byte) int { -+ for i := range dst { -+ dst[i] = '0' -+ } -+ return len(dst) -+} -+ -+// trim trailing zeros from number. -+// (They are meaningless; the decimal point is tracked -+// independent of the number of digits.) -+func trim(a *decimal) { -+ for a.nd > 0 && a.d[a.nd-1] == '0' { -+ a.nd-- -+ } -+ if a.nd == 0 { -+ a.dp = 0 -+ } -+} -+ -+// Assign v to a. -+func (a *decimal) Assign(v uint64) { -+ var buf [24]byte -+ -+ // Write reversed decimal in buf. -+ n := 0 -+ for v > 0 { -+ v1 := v / 10 -+ v -= 10 * v1 -+ buf[n] = byte(v + '0') -+ n++ -+ v = v1 -+ } -+ -+ // Reverse again to produce forward decimal in a.d. -+ a.nd = 0 -+ for n--; n >= 0; n-- { -+ a.d[a.nd] = buf[n] -+ a.nd++ -+ } -+ a.dp = a.nd -+ trim(a) -+} -+ -+// Maximum shift that we can do in one pass without overflow. -+// A uint has 32 or 64 bits, and we have to be able to accommodate 9<> 63) -+const maxShift = uintSize - 4 -+ -+// Binary shift right (/ 2) by k bits. k <= maxShift to avoid overflow. -+func rightShift(a *decimal, k uint) { -+ r := 0 // read pointer -+ w := 0 // write pointer -+ -+ // Pick up enough leading digits to cover first shift. -+ var n uint -+ for ; n>>k == 0; r++ { -+ if r >= a.nd { -+ if n == 0 { -+ // a == 0; shouldn't get here, but handle anyway. -+ a.nd = 0 -+ return -+ } -+ for n>>k == 0 { -+ n = n * 10 -+ r++ -+ } -+ break -+ } -+ c := uint(a.d[r]) -+ n = n*10 + c - '0' -+ } -+ a.dp -= r - 1 -+ -+ var mask uint = (1 << k) - 1 -+ -+ // Pick up a digit, put down a digit. -+ for ; r < a.nd; r++ { -+ c := uint(a.d[r]) -+ dig := n >> k -+ n &= mask -+ a.d[w] = byte(dig + '0') -+ w++ -+ n = n*10 + c - '0' -+ } -+ -+ // Put down extra digits. -+ for n > 0 { -+ dig := n >> k -+ n &= mask -+ if w < len(a.d) { -+ a.d[w] = byte(dig + '0') -+ w++ -+ } else if dig > 0 { -+ a.trunc = true -+ } -+ n = n * 10 -+ } -+ -+ a.nd = w -+ trim(a) -+} -+ -+// Cheat sheet for left shift: table indexed by shift count giving -+// number of new digits that will be introduced by that shift. -+// -+// For example, leftcheats[4] = {2, ""625""}. That means that -+// if we are shifting by 4 (multiplying by 16), it will add 2 digits -+// when the string prefix is ""625"" through ""999"", and one fewer digit -+// if the string prefix is ""000"" through ""624"". -+// -+// Credit for this trick goes to Ken. -+ -+type leftCheat struct { -+ delta int // number of new digits -+ cutoff string // minus one digit if original < a. -+} -+ -+var leftcheats = []leftCheat{ -+ // Leading digits of 1/2^i = 5^i. -+ // 5^23 is not an exact 64-bit floating point number, -+ // so have to use bc for the math. -+ // Go up to 60 to be large enough for 32bit and 64bit platforms. -+ /* -+ seq 60 | sed 's/^/5^/' | bc | -+ awk 'BEGIN{ print ""\t{ 0, \""\"" },"" } -+ { -+ log2 = log(2)/log(10) -+ printf(""\t{ %d, \""%s\"" },\t// * %d\n"", -+ int(log2*NR+1), $0, 2**NR) -+ }' -+ */ -+ {0, """"}, -+ {1, ""5""}, // * 2 -+ {1, ""25""}, // * 4 -+ {1, ""125""}, // * 8 -+ {2, ""625""}, // * 16 -+ {2, ""3125""}, // * 32 -+ {2, ""15625""}, // * 64 -+ {3, ""78125""}, // * 128 -+ {3, ""390625""}, // * 256 -+ {3, ""1953125""}, // * 512 -+ {4, ""9765625""}, // * 1024 -+ {4, ""48828125""}, // * 2048 -+ {4, ""244140625""}, // * 4096 -+ {4, ""1220703125""}, // * 8192 -+ {5, ""6103515625""}, // * 16384 -+ {5, ""30517578125""}, // * 32768 -+ {5, ""152587890625""}, // * 65536 -+ {6, ""762939453125""}, // * 131072 -+ {6, ""3814697265625""}, // * 262144 -+ {6, ""19073486328125""}, // * 524288 -+ {7, ""95367431640625""}, // * 1048576 -+ {7, ""476837158203125""}, // * 2097152 -+ {7, ""2384185791015625""}, // * 4194304 -+ {7, ""11920928955078125""}, // * 8388608 -+ {8, ""59604644775390625""}, // * 16777216 -+ {8, ""298023223876953125""}, // * 33554432 -+ {8, ""1490116119384765625""}, // * 67108864 -+ {9, ""7450580596923828125""}, // * 134217728 -+ {9, ""37252902984619140625""}, // * 268435456 -+ {9, ""186264514923095703125""}, // * 536870912 -+ {10, ""931322574615478515625""}, // * 1073741824 -+ {10, ""4656612873077392578125""}, // * 2147483648 -+ {10, ""23283064365386962890625""}, // * 4294967296 -+ {10, ""116415321826934814453125""}, // * 8589934592 -+ {11, ""582076609134674072265625""}, // * 17179869184 -+ {11, ""2910383045673370361328125""}, // * 34359738368 -+ {11, ""14551915228366851806640625""}, // * 68719476736 -+ {12, ""72759576141834259033203125""}, // * 137438953472 -+ {12, ""363797880709171295166015625""}, // * 274877906944 -+ {12, ""1818989403545856475830078125""}, // * 549755813888 -+ {13, ""9094947017729282379150390625""}, // * 1099511627776 -+ {13, ""45474735088646411895751953125""}, // * 2199023255552 -+ {13, ""227373675443232059478759765625""}, // * 4398046511104 -+ {13, ""1136868377216160297393798828125""}, // * 8796093022208 -+ {14, ""5684341886080801486968994140625""}, // * 17592186044416 -+ {14, ""28421709430404007434844970703125""}, // * 35184372088832 -+ {14, ""142108547152020037174224853515625""}, // * 70368744177664 -+ {15, ""710542735760100185871124267578125""}, // * 140737488355328 -+ {15, ""3552713678800500929355621337890625""}, // * 281474976710656 -+ {15, ""17763568394002504646778106689453125""}, // * 562949953421312 -+ {16, ""88817841970012523233890533447265625""}, // * 1125899906842624 -+ {16, ""444089209850062616169452667236328125""}, // * 2251799813685248 -+ {16, ""2220446049250313080847263336181640625""}, // * 4503599627370496 -+ {16, ""11102230246251565404236316680908203125""}, // * 9007199254740992 -+ {17, ""55511151231257827021181583404541015625""}, // * 18014398509481984 -+ {17, ""277555756156289135105907917022705078125""}, // * 36028797018963968 -+ {17, ""1387778780781445675529539585113525390625""}, // * 72057594037927936 -+ {18, ""6938893903907228377647697925567626953125""}, // * 144115188075855872 -+ {18, ""34694469519536141888238489627838134765625""}, // * 288230376151711744 -+ {18, ""173472347597680709441192448139190673828125""}, // * 576460752303423488 -+ {19, ""867361737988403547205962240695953369140625""}, // * 1152921504606846976 -+} -+ -+// Is the leading prefix of b lexicographically less than s? -+func prefixIsLessThan(b []byte, s string) bool { -+ for i := 0; i < len(s); i++ { -+ if i >= len(b) { -+ return true -+ } -+ if b[i] != s[i] { -+ return b[i] < s[i] -+ } -+ } -+ return false -+} -+ -+// Binary shift left (* 2) by k bits. k <= maxShift to avoid overflow. -+func leftShift(a *decimal, k uint) { -+ delta := leftcheats[k].delta -+ if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) { -+ delta-- -+ } -+ -+ r := a.nd // read index -+ w := a.nd + delta // write index -+ -+ // Pick up a digit, put down a digit. -+ var n uint -+ for r--; r >= 0; r-- { -+ n += (uint(a.d[r]) - '0') << k -+ quo := n / 10 -+ rem := n - 10*quo -+ w-- -+ if w < len(a.d) { -+ a.d[w] = byte(rem + '0') -+ } else if rem != 0 { -+ a.trunc = true -+ } -+ n = quo -+ } -+ -+ // Put down extra digits. -+ for n > 0 { -+ quo := n / 10 -+ rem := n - 10*quo -+ w-- -+ if w < len(a.d) { -+ a.d[w] = byte(rem + '0') -+ } else if rem != 0 { -+ a.trunc = true -+ } -+ n = quo -+ } -+ -+ a.nd += delta -+ if a.nd >= len(a.d) { -+ a.nd = len(a.d) -+ } -+ a.dp += delta -+ trim(a) -+} -+ -+// Binary shift left (k > 0) or right (k < 0). -+func (a *decimal) Shift(k int) { -+ switch { -+ case a.nd == 0: -+ // nothing to do: a == 0 -+ case k > 0: -+ for k > maxShift { -+ leftShift(a, maxShift) -+ k -= maxShift -+ } -+ leftShift(a, uint(k)) -+ case k < 0: -+ for k < -maxShift { -+ rightShift(a, maxShift) -+ k += maxShift -+ } -+ rightShift(a, uint(-k)) -+ } -+} -+ -+// If we chop a at nd digits, should we round up? -+func shouldRoundUp(a *decimal, nd int) bool { -+ if nd < 0 || nd >= a.nd { -+ return false -+ } -+ if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even -+ // if we truncated, a little higher than what's recorded - always round up -+ if a.trunc { -+ return true -+ } -+ return nd > 0 && (a.d[nd-1]-'0')%2 != 0 -+ } -+ // not halfway - digit tells all -+ return a.d[nd] >= '5' -+} -+ -+// Round a to nd digits (or fewer). -+// If nd is zero, it means we're rounding -+// just to the left of the digits, as in -+// 0.09 -> 0.1. -+func (a *decimal) Round(nd int) { -+ if nd < 0 || nd >= a.nd { -+ return -+ } -+ if shouldRoundUp(a, nd) { -+ a.RoundUp(nd) -+ } else { -+ a.RoundDown(nd) -+ } -+} -+ -+// Round a down to nd digits (or fewer). -+func (a *decimal) RoundDown(nd int) { -+ if nd < 0 || nd >= a.nd { -+ return -+ } -+ a.nd = nd -+ trim(a) -+} -+ -+// Round a up to nd digits (or fewer). -+func (a *decimal) RoundUp(nd int) { -+ if nd < 0 || nd >= a.nd { -+ return -+ } -+ -+ // round up -+ for i := nd - 1; i >= 0; i-- { -+ c := a.d[i] -+ if c < '9' { // can stop after this digit -+ a.d[i]++ -+ a.nd = i + 1 -+ return -+ } -+ } -+ -+ // Number is all 9s. -+ // Change to single 1 with adjusted decimal point. -+ a.d[0] = '1' -+ a.nd = 1 -+ a.dp++ -+} -+ -+// Extract integer part, rounded appropriately. -+// No guarantees about overflow. -+func (a *decimal) RoundedInteger() uint64 { -+ if a.dp > 20 { -+ return 0xFFFFFFFFFFFFFFFF -+ } -+ var i int -+ n := uint64(0) -+ for i = 0; i < a.dp && i < a.nd; i++ { -+ n = n*10 + uint64(a.d[i]-'0') -+ } -+ for ; i < a.dp; i++ { -+ n *= 10 -+ } -+ if shouldRoundUp(a, a.dp) { -+ n++ -+ } -+ return n -+} -diff --git a/vendor/github.com/shopspring/decimal/decimal.go b/vendor/github.com/shopspring/decimal/decimal.go -new file mode 100644 -index 0000000000000..801c1a0457a46 ---- /dev/null -+++ b/vendor/github.com/shopspring/decimal/decimal.go -@@ -0,0 +1,1477 @@ -+// Package decimal implements an arbitrary precision fixed-point decimal. -+// -+// The zero-value of a Decimal is 0, as you would expect. -+// -+// The best way to create a new Decimal is to use decimal.NewFromString, ex: -+// -+// n, err := decimal.NewFromString(""-123.4567"") -+// n.String() // output: ""-123.4567"" -+// -+// To use Decimal as part of a struct: -+// -+// type Struct struct { -+// Number Decimal -+// } -+// -+// Note: This can ""only"" represent numbers with a maximum of 2^31 digits after the decimal point. -+package decimal -+ -+import ( -+ ""database/sql/driver"" -+ ""encoding/binary"" -+ ""fmt"" -+ ""math"" -+ ""math/big"" -+ ""strconv"" -+ ""strings"" -+) -+ -+// DivisionPrecision is the number of decimal places in the result when it -+// doesn't divide exactly. -+// -+// Example: -+// -+// d1 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) -+// d1.String() // output: ""0.6666666666666667"" -+// d2 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(30000)) -+// d2.String() // output: ""0.0000666666666667"" -+// d3 := decimal.NewFromFloat(20000).Div(decimal.NewFromFloat(3)) -+// d3.String() // output: ""6666.6666666666666667"" -+// decimal.DivisionPrecision = 3 -+// d4 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) -+// d4.String() // output: ""0.667"" -+// -+var DivisionPrecision = 16 -+ -+// MarshalJSONWithoutQuotes should be set to true if you want the decimal to -+// be JSON marshaled as a number, instead of as a string. -+// WARNING: this is dangerous for decimals with many digits, since many JSON -+// unmarshallers (ex: Javascript's) will unmarshal JSON numbers to IEEE 754 -+// double-precision floating point numbers, which means you can potentially -+// silently lose precision. -+var MarshalJSONWithoutQuotes = false -+ -+// Zero constant, to make computations faster. -+// Zero should never be compared with == or != directly, please use decimal.Equal or decimal.Cmp instead. -+var Zero = New(0, 1) -+ -+var zeroInt = big.NewInt(0) -+var oneInt = big.NewInt(1) -+var twoInt = big.NewInt(2) -+var fourInt = big.NewInt(4) -+var fiveInt = big.NewInt(5) -+var tenInt = big.NewInt(10) -+var twentyInt = big.NewInt(20) -+ -+// Decimal represents a fixed-point decimal. It is immutable. -+// number = value * 10 ^ exp -+type Decimal struct { -+ value *big.Int -+ -+ // NOTE(vadim): this must be an int32, because we cast it to float64 during -+ // calculations. If exp is 64 bit, we might lose precision. -+ // If we cared about being able to represent every possible decimal, we -+ // could make exp a *big.Int but it would hurt performance and numbers -+ // like that are unrealistic. -+ exp int32 -+} -+ -+// New returns a new fixed-point decimal, value * 10 ^ exp. -+func New(value int64, exp int32) Decimal { -+ return Decimal{ -+ value: big.NewInt(value), -+ exp: exp, -+ } -+} -+ -+// NewFromInt converts a int64 to Decimal. -+// -+// Example: -+// -+// NewFromInt(123).String() // output: ""123"" -+// NewFromInt(-10).String() // output: ""-10"" -+func NewFromInt(value int64) Decimal { -+ return Decimal{ -+ value: big.NewInt(value), -+ exp: 0, -+ } -+} -+ -+// NewFromInt32 converts a int32 to Decimal. -+// -+// Example: -+// -+// NewFromInt(123).String() // output: ""123"" -+// NewFromInt(-10).String() // output: ""-10"" -+func NewFromInt32(value int32) Decimal { -+ return Decimal{ -+ value: big.NewInt(int64(value)), -+ exp: 0, -+ } -+} -+ -+// NewFromBigInt returns a new Decimal from a big.Int, value * 10 ^ exp -+func NewFromBigInt(value *big.Int, exp int32) Decimal { -+ return Decimal{ -+ value: big.NewInt(0).Set(value), -+ exp: exp, -+ } -+} -+ -+// NewFromString returns a new Decimal from a string representation. -+// Trailing zeroes are not trimmed. -+// -+// Example: -+// -+// d, err := NewFromString(""-123.45"") -+// d2, err := NewFromString("".0001"") -+// d3, err := NewFromString(""1.47000"") -+// -+func NewFromString(value string) (Decimal, error) { -+ originalInput := value -+ var intString string -+ var exp int64 -+ -+ // Check if number is using scientific notation -+ eIndex := strings.IndexAny(value, ""Ee"") -+ if eIndex != -1 { -+ expInt, err := strconv.ParseInt(value[eIndex+1:], 10, 32) -+ if err != nil { -+ if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { -+ return Decimal{}, fmt.Errorf(""can't convert %s to decimal: fractional part too long"", value) -+ } -+ return Decimal{}, fmt.Errorf(""can't convert %s to decimal: exponent is not numeric"", value) -+ } -+ value = value[:eIndex] -+ exp = expInt -+ } -+ -+ parts := strings.Split(value, ""."") -+ if len(parts) == 1 { -+ // There is no decimal point, we can just parse the original string as -+ // an int -+ intString = value -+ } else if len(parts) == 2 { -+ intString = parts[0] + parts[1] -+ expInt := -len(parts[1]) -+ exp += int64(expInt) -+ } else { -+ return Decimal{}, fmt.Errorf(""can't convert %s to decimal: too many .s"", value) -+ } -+ -+ dValue := new(big.Int) -+ _, ok := dValue.SetString(intString, 10) -+ if !ok { -+ return Decimal{}, fmt.Errorf(""can't convert %s to decimal"", value) -+ } -+ -+ if exp < math.MinInt32 || exp > math.MaxInt32 { -+ // NOTE(vadim): I doubt a string could realistically be this long -+ return Decimal{}, fmt.Errorf(""can't convert %s to decimal: fractional part too long"", originalInput) -+ } -+ -+ return Decimal{ -+ value: dValue, -+ exp: int32(exp), -+ }, nil -+} -+ -+// RequireFromString returns a new Decimal from a string representation -+// or panics if NewFromString would have returned an error. -+// -+// Example: -+// -+// d := RequireFromString(""-123.45"") -+// d2 := RequireFromString("".0001"") -+// -+func RequireFromString(value string) Decimal { -+ dec, err := NewFromString(value) -+ if err != nil { -+ panic(err) -+ } -+ return dec -+} -+ -+// NewFromFloat converts a float64 to Decimal. -+// -+// The converted number will contain the number of significant digits that can be -+// represented in a float with reliable roundtrip. -+// This is typically 15 digits, but may be more in some cases. -+// See https://www.exploringbinary.com/decimal-precision-of-binary-floating-point-numbers/ for more information. -+// -+// For slightly faster conversion, use NewFromFloatWithExponent where you can specify the precision in absolute terms. -+// -+// NOTE: this will panic on NaN, +/-inf -+func NewFromFloat(value float64) Decimal { -+ if value == 0 { -+ return New(0, 0) -+ } -+ return newFromFloat(value, math.Float64bits(value), &float64info) -+} -+ -+// NewFromFloat32 converts a float32 to Decimal. -+// -+// The converted number will contain the number of significant digits that can be -+// represented in a float with reliable roundtrip. -+// This is typically 6-8 digits depending on the input. -+// See https://www.exploringbinary.com/decimal-precision-of-binary-floating-point-numbers/ for more information. -+// -+// For slightly faster conversion, use NewFromFloatWithExponent where you can specify the precision in absolute terms. -+// -+// NOTE: this will panic on NaN, +/-inf -+func NewFromFloat32(value float32) Decimal { -+ if value == 0 { -+ return New(0, 0) -+ } -+ // XOR is workaround for https://github.com/golang/go/issues/26285 -+ a := math.Float32bits(value) ^ 0x80808080 -+ return newFromFloat(float64(value), uint64(a)^0x80808080, &float32info) -+} -+ -+func newFromFloat(val float64, bits uint64, flt *floatInfo) Decimal { -+ if math.IsNaN(val) || math.IsInf(val, 0) { -+ panic(fmt.Sprintf(""Cannot create a Decimal from %v"", val)) -+ } -+ exp := int(bits>>flt.mantbits) & (1<>(flt.expbits+flt.mantbits) != 0 -+ -+ roundShortest(&d, mant, exp, flt) -+ // If less than 19 digits, we can do calculation in an int64. -+ if d.nd < 19 { -+ tmp := int64(0) -+ m := int64(1) -+ for i := d.nd - 1; i >= 0; i-- { -+ tmp += m * int64(d.d[i]-'0') -+ m *= 10 -+ } -+ if d.neg { -+ tmp *= -1 -+ } -+ return Decimal{value: big.NewInt(tmp), exp: int32(d.dp) - int32(d.nd)} -+ } -+ dValue := new(big.Int) -+ dValue, ok := dValue.SetString(string(d.d[:d.nd]), 10) -+ if ok { -+ return Decimal{value: dValue, exp: int32(d.dp) - int32(d.nd)} -+ } -+ -+ return NewFromFloatWithExponent(val, int32(d.dp)-int32(d.nd)) -+} -+ -+// NewFromFloatWithExponent converts a float64 to Decimal, with an arbitrary -+// number of fractional digits. -+// -+// Example: -+// -+// NewFromFloatWithExponent(123.456, -2).String() // output: ""123.46"" -+// -+func NewFromFloatWithExponent(value float64, exp int32) Decimal { -+ if math.IsNaN(value) || math.IsInf(value, 0) { -+ panic(fmt.Sprintf(""Cannot create a Decimal from %v"", value)) -+ } -+ -+ bits := math.Float64bits(value) -+ mant := bits & (1<<52 - 1) -+ exp2 := int32((bits >> 52) & (1<<11 - 1)) -+ sign := bits >> 63 -+ -+ if exp2 == 0 { -+ // specials -+ if mant == 0 { -+ return Decimal{} -+ } -+ // subnormal -+ exp2++ -+ } else { -+ // normal -+ mant |= 1 << 52 -+ } -+ -+ exp2 -= 1023 + 52 -+ -+ // normalizing base-2 values -+ for mant&1 == 0 { -+ mant = mant >> 1 -+ exp2++ -+ } -+ -+ // maximum number of fractional base-10 digits to represent 2^N exactly cannot be more than -N if N<0 -+ if exp < 0 && exp < exp2 { -+ if exp2 < 0 { -+ exp = exp2 -+ } else { -+ exp = 0 -+ } -+ } -+ -+ // representing 10^M * 2^N as 5^M * 2^(M+N) -+ exp2 -= exp -+ -+ temp := big.NewInt(1) -+ dMant := big.NewInt(int64(mant)) -+ -+ // applying 5^M -+ if exp > 0 { -+ temp = temp.SetInt64(int64(exp)) -+ temp = temp.Exp(fiveInt, temp, nil) -+ } else if exp < 0 { -+ temp = temp.SetInt64(-int64(exp)) -+ temp = temp.Exp(fiveInt, temp, nil) -+ dMant = dMant.Mul(dMant, temp) -+ temp = temp.SetUint64(1) -+ } -+ -+ // applying 2^(M+N) -+ if exp2 > 0 { -+ dMant = dMant.Lsh(dMant, uint(exp2)) -+ } else if exp2 < 0 { -+ temp = temp.Lsh(temp, uint(-exp2)) -+ } -+ -+ // rounding and downscaling -+ if exp > 0 || exp2 < 0 { -+ halfDown := new(big.Int).Rsh(temp, 1) -+ dMant = dMant.Add(dMant, halfDown) -+ dMant = dMant.Quo(dMant, temp) -+ } -+ -+ if sign == 1 { -+ dMant = dMant.Neg(dMant) -+ } -+ -+ return Decimal{ -+ value: dMant, -+ exp: exp, -+ } -+} -+ -+// rescale returns a rescaled version of the decimal. Returned -+// decimal may be less precise if the given exponent is bigger -+// than the initial exponent of the Decimal. -+// NOTE: this will truncate, NOT round -+// -+// Example: -+// -+// d := New(12345, -4) -+// d2 := d.rescale(-1) -+// d3 := d2.rescale(-4) -+// println(d1) -+// println(d2) -+// println(d3) -+// -+// Output: -+// -+// 1.2345 -+// 1.2 -+// 1.2000 -+// -+func (d Decimal) rescale(exp int32) Decimal { -+ d.ensureInitialized() -+ -+ if d.exp == exp { -+ return Decimal{ -+ new(big.Int).Set(d.value), -+ d.exp, -+ } -+ } -+ -+ // NOTE(vadim): must convert exps to float64 before - to prevent overflow -+ diff := math.Abs(float64(exp) - float64(d.exp)) -+ value := new(big.Int).Set(d.value) -+ -+ expScale := new(big.Int).Exp(tenInt, big.NewInt(int64(diff)), nil) -+ if exp > d.exp { -+ value = value.Quo(value, expScale) -+ } else if exp < d.exp { -+ value = value.Mul(value, expScale) -+ } -+ -+ return Decimal{ -+ value: value, -+ exp: exp, -+ } -+} -+ -+// Abs returns the absolute value of the decimal. -+func (d Decimal) Abs() Decimal { -+ d.ensureInitialized() -+ d2Value := new(big.Int).Abs(d.value) -+ return Decimal{ -+ value: d2Value, -+ exp: d.exp, -+ } -+} -+ -+// Add returns d + d2. -+func (d Decimal) Add(d2 Decimal) Decimal { -+ rd, rd2 := RescalePair(d, d2) -+ -+ d3Value := new(big.Int).Add(rd.value, rd2.value) -+ return Decimal{ -+ value: d3Value, -+ exp: rd.exp, -+ } -+} -+ -+// Sub returns d - d2. -+func (d Decimal) Sub(d2 Decimal) Decimal { -+ rd, rd2 := RescalePair(d, d2) -+ -+ d3Value := new(big.Int).Sub(rd.value, rd2.value) -+ return Decimal{ -+ value: d3Value, -+ exp: rd.exp, -+ } -+} -+ -+// Neg returns -d. -+func (d Decimal) Neg() Decimal { -+ d.ensureInitialized() -+ val := new(big.Int).Neg(d.value) -+ return Decimal{ -+ value: val, -+ exp: d.exp, -+ } -+} -+ -+// Mul returns d * d2. -+func (d Decimal) Mul(d2 Decimal) Decimal { -+ d.ensureInitialized() -+ d2.ensureInitialized() -+ -+ expInt64 := int64(d.exp) + int64(d2.exp) -+ if expInt64 > math.MaxInt32 || expInt64 < math.MinInt32 { -+ // NOTE(vadim): better to panic than give incorrect results, as -+ // Decimals are usually used for money -+ panic(fmt.Sprintf(""exponent %v overflows an int32!"", expInt64)) -+ } -+ -+ d3Value := new(big.Int).Mul(d.value, d2.value) -+ return Decimal{ -+ value: d3Value, -+ exp: int32(expInt64), -+ } -+} -+ -+// Shift shifts the decimal in base 10. -+// It shifts left when shift is positive and right if shift is negative. -+// In simpler terms, the given value for shift is added to the exponent -+// of the decimal. -+func (d Decimal) Shift(shift int32) Decimal { -+ d.ensureInitialized() -+ return Decimal{ -+ value: new(big.Int).Set(d.value), -+ exp: d.exp + shift, -+ } -+} -+ -+// Div returns d / d2. If it doesn't divide exactly, the result will have -+// DivisionPrecision digits after the decimal point. -+func (d Decimal) Div(d2 Decimal) Decimal { -+ return d.DivRound(d2, int32(DivisionPrecision)) -+} -+ -+// QuoRem does divsion with remainder -+// d.QuoRem(d2,precision) returns quotient q and remainder r such that -+// d = d2 * q + r, q an integer multiple of 10^(-precision) -+// 0 <= r < abs(d2) * 10 ^(-precision) if d>=0 -+// 0 >= r > -abs(d2) * 10 ^(-precision) if d<0 -+// Note that precision<0 is allowed as input. -+func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) { -+ d.ensureInitialized() -+ d2.ensureInitialized() -+ if d2.value.Sign() == 0 { -+ panic(""decimal division by 0"") -+ } -+ scale := -precision -+ e := int64(d.exp - d2.exp - scale) -+ if e > math.MaxInt32 || e < math.MinInt32 { -+ panic(""overflow in decimal QuoRem"") -+ } -+ var aa, bb, expo big.Int -+ var scalerest int32 -+ // d = a 10^ea -+ // d2 = b 10^eb -+ if e < 0 { -+ aa = *d.value -+ expo.SetInt64(-e) -+ bb.Exp(tenInt, &expo, nil) -+ bb.Mul(d2.value, &bb) -+ scalerest = d.exp -+ // now aa = a -+ // bb = b 10^(scale + eb - ea) -+ } else { -+ expo.SetInt64(e) -+ aa.Exp(tenInt, &expo, nil) -+ aa.Mul(d.value, &aa) -+ bb = *d2.value -+ scalerest = scale + d2.exp -+ // now aa = a ^ (ea - eb - scale) -+ // bb = b -+ } -+ var q, r big.Int -+ q.QuoRem(&aa, &bb, &r) -+ dq := Decimal{value: &q, exp: scale} -+ dr := Decimal{value: &r, exp: scalerest} -+ return dq, dr -+} -+ -+// DivRound divides and rounds to a given precision -+// i.e. to an integer multiple of 10^(-precision) -+// for a positive quotient digit 5 is rounded up, away from 0 -+// if the quotient is negative then digit 5 is rounded down, away from 0 -+// Note that precision<0 is allowed as input. -+func (d Decimal) DivRound(d2 Decimal, precision int32) Decimal { -+ // QuoRem already checks initialization -+ q, r := d.QuoRem(d2, precision) -+ // the actual rounding decision is based on comparing r*10^precision and d2/2 -+ // instead compare 2 r 10 ^precision and d2 -+ var rv2 big.Int -+ rv2.Abs(r.value) -+ rv2.Lsh(&rv2, 1) -+ // now rv2 = abs(r.value) * 2 -+ r2 := Decimal{value: &rv2, exp: r.exp + precision} -+ // r2 is now 2 * r * 10 ^ precision -+ var c = r2.Cmp(d2.Abs()) -+ -+ if c < 0 { -+ return q -+ } -+ -+ if d.value.Sign()*d2.value.Sign() < 0 { -+ return q.Sub(New(1, -precision)) -+ } -+ -+ return q.Add(New(1, -precision)) -+} -+ -+// Mod returns d % d2. -+func (d Decimal) Mod(d2 Decimal) Decimal { -+ quo := d.Div(d2).Truncate(0) -+ return d.Sub(d2.Mul(quo)) -+} -+ -+// Pow returns d to the power d2 -+func (d Decimal) Pow(d2 Decimal) Decimal { -+ var temp Decimal -+ if d2.IntPart() == 0 { -+ return NewFromFloat(1) -+ } -+ temp = d.Pow(d2.Div(NewFromFloat(2))) -+ if d2.IntPart()%2 == 0 { -+ return temp.Mul(temp) -+ } -+ if d2.IntPart() > 0 { -+ return temp.Mul(temp).Mul(d) -+ } -+ return temp.Mul(temp).Div(d) -+} -+ -+// Cmp compares the numbers represented by d and d2 and returns: -+// -+// -1 if d < d2 -+// 0 if d == d2 -+// +1 if d > d2 -+// -+func (d Decimal) Cmp(d2 Decimal) int { -+ d.ensureInitialized() -+ d2.ensureInitialized() -+ -+ if d.exp == d2.exp { -+ return d.value.Cmp(d2.value) -+ } -+ -+ rd, rd2 := RescalePair(d, d2) -+ -+ return rd.value.Cmp(rd2.value) -+} -+ -+// Equal returns whether the numbers represented by d and d2 are equal. -+func (d Decimal) Equal(d2 Decimal) bool { -+ return d.Cmp(d2) == 0 -+} -+ -+// Equals is deprecated, please use Equal method instead -+func (d Decimal) Equals(d2 Decimal) bool { -+ return d.Equal(d2) -+} -+ -+// GreaterThan (GT) returns true when d is greater than d2. -+func (d Decimal) GreaterThan(d2 Decimal) bool { -+ return d.Cmp(d2) == 1 -+} -+ -+// GreaterThanOrEqual (GTE) returns true when d is greater than or equal to d2. -+func (d Decimal) GreaterThanOrEqual(d2 Decimal) bool { -+ cmp := d.Cmp(d2) -+ return cmp == 1 || cmp == 0 -+} -+ -+// LessThan (LT) returns true when d is less than d2. -+func (d Decimal) LessThan(d2 Decimal) bool { -+ return d.Cmp(d2) == -1 -+} -+ -+// LessThanOrEqual (LTE) returns true when d is less than or equal to d2. -+func (d Decimal) LessThanOrEqual(d2 Decimal) bool { -+ cmp := d.Cmp(d2) -+ return cmp == -1 || cmp == 0 -+} -+ -+// Sign returns: -+// -+// -1 if d < 0 -+// 0 if d == 0 -+// +1 if d > 0 -+// -+func (d Decimal) Sign() int { -+ if d.value == nil { -+ return 0 -+ } -+ return d.value.Sign() -+} -+ -+// IsPositive return -+// -+// true if d > 0 -+// false if d == 0 -+// false if d < 0 -+func (d Decimal) IsPositive() bool { -+ return d.Sign() == 1 -+} -+ -+// IsNegative return -+// -+// true if d < 0 -+// false if d == 0 -+// false if d > 0 -+func (d Decimal) IsNegative() bool { -+ return d.Sign() == -1 -+} -+ -+// IsZero return -+// -+// true if d == 0 -+// false if d > 0 -+// false if d < 0 -+func (d Decimal) IsZero() bool { -+ return d.Sign() == 0 -+} -+ -+// Exponent returns the exponent, or scale component of the decimal. -+func (d Decimal) Exponent() int32 { -+ return d.exp -+} -+ -+// Coefficient returns the coefficient of the decimal. It is scaled by 10^Exponent() -+func (d Decimal) Coefficient() *big.Int { -+ d.ensureInitialized() -+ // we copy the coefficient so that mutating the result does not mutate the -+ // Decimal. -+ return big.NewInt(0).Set(d.value) -+} -+ -+// IntPart returns the integer component of the decimal. -+func (d Decimal) IntPart() int64 { -+ scaledD := d.rescale(0) -+ return scaledD.value.Int64() -+} -+ -+// BigInt returns integer component of the decimal as a BigInt. -+func (d Decimal) BigInt() *big.Int { -+ scaledD := d.rescale(0) -+ i := &big.Int{} -+ i.SetString(scaledD.String(), 10) -+ return i -+} -+ -+// BigFloat returns decimal as BigFloat. -+// Be aware that casting decimal to BigFloat might cause a loss of precision. -+func (d Decimal) BigFloat() *big.Float { -+ f := &big.Float{} -+ f.SetString(d.String()) -+ return f -+} -+ -+// Rat returns a rational number representation of the decimal. -+func (d Decimal) Rat() *big.Rat { -+ d.ensureInitialized() -+ if d.exp <= 0 { -+ // NOTE(vadim): must negate after casting to prevent int32 overflow -+ denom := new(big.Int).Exp(tenInt, big.NewInt(-int64(d.exp)), nil) -+ return new(big.Rat).SetFrac(d.value, denom) -+ } -+ -+ mul := new(big.Int).Exp(tenInt, big.NewInt(int64(d.exp)), nil) -+ num := new(big.Int).Mul(d.value, mul) -+ return new(big.Rat).SetFrac(num, oneInt) -+} -+ -+// Float64 returns the nearest float64 value for d and a bool indicating -+// whether f represents d exactly. -+// For more details, see the documentation for big.Rat.Float64 -+func (d Decimal) Float64() (f float64, exact bool) { -+ return d.Rat().Float64() -+} -+ -+// String returns the string representation of the decimal -+// with the fixed point. -+// -+// Example: -+// -+// d := New(-12345, -3) -+// println(d.String()) -+// -+// Output: -+// -+// -12.345 -+// -+func (d Decimal) String() string { -+ return d.string(true) -+} -+ -+// StringFixed returns a rounded fixed-point string with places digits after -+// the decimal point. -+// -+// Example: -+// -+// NewFromFloat(0).StringFixed(2) // output: ""0.00"" -+// NewFromFloat(0).StringFixed(0) // output: ""0"" -+// NewFromFloat(5.45).StringFixed(0) // output: ""5"" -+// NewFromFloat(5.45).StringFixed(1) // output: ""5.5"" -+// NewFromFloat(5.45).StringFixed(2) // output: ""5.45"" -+// NewFromFloat(5.45).StringFixed(3) // output: ""5.450"" -+// NewFromFloat(545).StringFixed(-1) // output: ""550"" -+// -+func (d Decimal) StringFixed(places int32) string { -+ rounded := d.Round(places) -+ return rounded.string(false) -+} -+ -+// StringFixedBank returns a banker rounded fixed-point string with places digits -+// after the decimal point. -+// -+// Example: -+// -+// NewFromFloat(0).StringFixedBank(2) // output: ""0.00"" -+// NewFromFloat(0).StringFixedBank(0) // output: ""0"" -+// NewFromFloat(5.45).StringFixedBank(0) // output: ""5"" -+// NewFromFloat(5.45).StringFixedBank(1) // output: ""5.4"" -+// NewFromFloat(5.45).StringFixedBank(2) // output: ""5.45"" -+// NewFromFloat(5.45).StringFixedBank(3) // output: ""5.450"" -+// NewFromFloat(545).StringFixedBank(-1) // output: ""540"" -+// -+func (d Decimal) StringFixedBank(places int32) string { -+ rounded := d.RoundBank(places) -+ return rounded.string(false) -+} -+ -+// StringFixedCash returns a Swedish/Cash rounded fixed-point string. For -+// more details see the documentation at function RoundCash. -+func (d Decimal) StringFixedCash(interval uint8) string { -+ rounded := d.RoundCash(interval) -+ return rounded.string(false) -+} -+ -+// Round rounds the decimal to places decimal places. -+// If places < 0, it will round the integer part to the nearest 10^(-places). -+// -+// Example: -+// -+// NewFromFloat(5.45).Round(1).String() // output: ""5.5"" -+// NewFromFloat(545).Round(-1).String() // output: ""550"" -+// -+func (d Decimal) Round(places int32) Decimal { -+ // truncate to places + 1 -+ ret := d.rescale(-places - 1) -+ -+ // add sign(d) * 0.5 -+ if ret.value.Sign() < 0 { -+ ret.value.Sub(ret.value, fiveInt) -+ } else { -+ ret.value.Add(ret.value, fiveInt) -+ } -+ -+ // floor for positive numbers, ceil for negative numbers -+ _, m := ret.value.DivMod(ret.value, tenInt, new(big.Int)) -+ ret.exp++ -+ if ret.value.Sign() < 0 && m.Cmp(zeroInt) != 0 { -+ ret.value.Add(ret.value, oneInt) -+ } -+ -+ return ret -+} -+ -+// RoundBank rounds the decimal to places decimal places. -+// If the final digit to round is equidistant from the nearest two integers the -+// rounded value is taken as the even number -+// -+// If places < 0, it will round the integer part to the nearest 10^(-places). -+// -+// Examples: -+// -+// NewFromFloat(5.45).Round(1).String() // output: ""5.4"" -+// NewFromFloat(545).Round(-1).String() // output: ""540"" -+// NewFromFloat(5.46).Round(1).String() // output: ""5.5"" -+// NewFromFloat(546).Round(-1).String() // output: ""550"" -+// NewFromFloat(5.55).Round(1).String() // output: ""5.6"" -+// NewFromFloat(555).Round(-1).String() // output: ""560"" -+// -+func (d Decimal) RoundBank(places int32) Decimal { -+ -+ round := d.Round(places) -+ remainder := d.Sub(round).Abs() -+ -+ half := New(5, -places-1) -+ if remainder.Cmp(half) == 0 && round.value.Bit(0) != 0 { -+ if round.value.Sign() < 0 { -+ round.value.Add(round.value, oneInt) -+ } else { -+ round.value.Sub(round.value, oneInt) -+ } -+ } -+ -+ return round -+} -+ -+// RoundCash aka Cash/Penny/öre rounding rounds decimal to a specific -+// interval. The amount payable for a cash transaction is rounded to the nearest -+// multiple of the minimum currency unit available. The following intervals are -+// available: 5, 10, 25, 50 and 100; any other number throws a panic. -+// 5: 5 cent rounding 3.43 => 3.45 -+// 10: 10 cent rounding 3.45 => 3.50 (5 gets rounded up) -+// 25: 25 cent rounding 3.41 => 3.50 -+// 50: 50 cent rounding 3.75 => 4.00 -+// 100: 100 cent rounding 3.50 => 4.00 -+// For more details: https://en.wikipedia.org/wiki/Cash_rounding -+func (d Decimal) RoundCash(interval uint8) Decimal { -+ var iVal *big.Int -+ switch interval { -+ case 5: -+ iVal = twentyInt -+ case 10: -+ iVal = tenInt -+ case 25: -+ iVal = fourInt -+ case 50: -+ iVal = twoInt -+ case 100: -+ iVal = oneInt -+ default: -+ panic(fmt.Sprintf(""Decimal does not support this Cash rounding interval `%d`. Supported: 5, 10, 25, 50, 100"", interval)) -+ } -+ dVal := Decimal{ -+ value: iVal, -+ } -+ -+ // TODO: optimize those calculations to reduce the high allocations (~29 allocs). -+ return d.Mul(dVal).Round(0).Div(dVal).Truncate(2) -+} -+ -+// Floor returns the nearest integer value less than or equal to d. -+func (d Decimal) Floor() Decimal { -+ d.ensureInitialized() -+ -+ if d.exp >= 0 { -+ return d -+ } -+ -+ exp := big.NewInt(10) -+ -+ // NOTE(vadim): must negate after casting to prevent int32 overflow -+ exp.Exp(exp, big.NewInt(-int64(d.exp)), nil) -+ -+ z := new(big.Int).Div(d.value, exp) -+ return Decimal{value: z, exp: 0} -+} -+ -+// Ceil returns the nearest integer value greater than or equal to d. -+func (d Decimal) Ceil() Decimal { -+ d.ensureInitialized() -+ -+ if d.exp >= 0 { -+ return d -+ } -+ -+ exp := big.NewInt(10) -+ -+ // NOTE(vadim): must negate after casting to prevent int32 overflow -+ exp.Exp(exp, big.NewInt(-int64(d.exp)), nil) -+ -+ z, m := new(big.Int).DivMod(d.value, exp, new(big.Int)) -+ if m.Cmp(zeroInt) != 0 { -+ z.Add(z, oneInt) -+ } -+ return Decimal{value: z, exp: 0} -+} -+ -+// Truncate truncates off digits from the number, without rounding. -+// -+// NOTE: precision is the last digit that will not be truncated (must be >= 0). -+// -+// Example: -+// -+// decimal.NewFromString(""123.456"").Truncate(2).String() // ""123.45"" -+// -+func (d Decimal) Truncate(precision int32) Decimal { -+ d.ensureInitialized() -+ if precision >= 0 && -precision > d.exp { -+ return d.rescale(-precision) -+ } -+ return d -+} -+ -+// UnmarshalJSON implements the json.Unmarshaler interface. -+func (d *Decimal) UnmarshalJSON(decimalBytes []byte) error { -+ if string(decimalBytes) == ""null"" { -+ return nil -+ } -+ -+ str, err := unquoteIfQuoted(decimalBytes) -+ if err != nil { -+ return fmt.Errorf(""error decoding string '%s': %s"", decimalBytes, err) -+ } -+ -+ decimal, err := NewFromString(str) -+ *d = decimal -+ if err != nil { -+ return fmt.Errorf(""error decoding string '%s': %s"", str, err) -+ } -+ return nil -+} -+ -+// MarshalJSON implements the json.Marshaler interface. -+func (d Decimal) MarshalJSON() ([]byte, error) { -+ var str string -+ if MarshalJSONWithoutQuotes { -+ str = d.String() -+ } else { -+ str = ""\"""" + d.String() + ""\"""" -+ } -+ return []byte(str), nil -+} -+ -+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. As a string representation -+// is already used when encoding to text, this method stores that string as []byte -+func (d *Decimal) UnmarshalBinary(data []byte) error { -+ // Extract the exponent -+ d.exp = int32(binary.BigEndian.Uint32(data[:4])) -+ -+ // Extract the value -+ d.value = new(big.Int) -+ return d.value.GobDecode(data[4:]) -+} -+ -+// MarshalBinary implements the encoding.BinaryMarshaler interface. -+func (d Decimal) MarshalBinary() (data []byte, err error) { -+ // Write the exponent first since it's a fixed size -+ v1 := make([]byte, 4) -+ binary.BigEndian.PutUint32(v1, uint32(d.exp)) -+ -+ // Add the value -+ var v2 []byte -+ if v2, err = d.value.GobEncode(); err != nil { -+ return -+ } -+ -+ // Return the byte array -+ data = append(v1, v2...) -+ return -+} -+ -+// Scan implements the sql.Scanner interface for database deserialization. -+func (d *Decimal) Scan(value interface{}) error { -+ // first try to see if the data is stored in database as a Numeric datatype -+ switch v := value.(type) { -+ -+ case float32: -+ *d = NewFromFloat(float64(v)) -+ return nil -+ -+ case float64: -+ // numeric in sqlite3 sends us float64 -+ *d = NewFromFloat(v) -+ return nil -+ -+ case int64: -+ // at least in sqlite3 when the value is 0 in db, the data is sent -+ // to us as an int64 instead of a float64 ... -+ *d = New(v, 0) -+ return nil -+ -+ default: -+ // default is trying to interpret value stored as string -+ str, err := unquoteIfQuoted(v) -+ if err != nil { -+ return err -+ } -+ *d, err = NewFromString(str) -+ return err -+ } -+} -+ -+// Value implements the driver.Valuer interface for database serialization. -+func (d Decimal) Value() (driver.Value, error) { -+ return d.String(), nil -+} -+ -+// UnmarshalText implements the encoding.TextUnmarshaler interface for XML -+// deserialization. -+func (d *Decimal) UnmarshalText(text []byte) error { -+ str := string(text) -+ -+ dec, err := NewFromString(str) -+ *d = dec -+ if err != nil { -+ return fmt.Errorf(""error decoding string '%s': %s"", str, err) -+ } -+ -+ return nil -+} -+ -+// MarshalText implements the encoding.TextMarshaler interface for XML -+// serialization. -+func (d Decimal) MarshalText() (text []byte, err error) { -+ return []byte(d.String()), nil -+} -+ -+// GobEncode implements the gob.GobEncoder interface for gob serialization. -+func (d Decimal) GobEncode() ([]byte, error) { -+ return d.MarshalBinary() -+} -+ -+// GobDecode implements the gob.GobDecoder interface for gob serialization. -+func (d *Decimal) GobDecode(data []byte) error { -+ return d.UnmarshalBinary(data) -+} -+ -+// StringScaled first scales the decimal then calls .String() on it. -+// NOTE: buggy, unintuitive, and DEPRECATED! Use StringFixed instead. -+func (d Decimal) StringScaled(exp int32) string { -+ return d.rescale(exp).String() -+} -+ -+func (d Decimal) string(trimTrailingZeros bool) string { -+ if d.exp >= 0 { -+ return d.rescale(0).value.String() -+ } -+ -+ abs := new(big.Int).Abs(d.value) -+ str := abs.String() -+ -+ var intPart, fractionalPart string -+ -+ // NOTE(vadim): this cast to int will cause bugs if d.exp == INT_MIN -+ // and you are on a 32-bit machine. Won't fix this super-edge case. -+ dExpInt := int(d.exp) -+ if len(str) > -dExpInt { -+ intPart = str[:len(str)+dExpInt] -+ fractionalPart = str[len(str)+dExpInt:] -+ } else { -+ intPart = ""0"" -+ -+ num0s := -dExpInt - len(str) -+ fractionalPart = strings.Repeat(""0"", num0s) + str -+ } -+ -+ if trimTrailingZeros { -+ i := len(fractionalPart) - 1 -+ for ; i >= 0; i-- { -+ if fractionalPart[i] != '0' { -+ break -+ } -+ } -+ fractionalPart = fractionalPart[:i+1] -+ } -+ -+ number := intPart -+ if len(fractionalPart) > 0 { -+ number += ""."" + fractionalPart -+ } -+ -+ if d.value.Sign() < 0 { -+ return ""-"" + number -+ } -+ -+ return number -+} -+ -+func (d *Decimal) ensureInitialized() { -+ if d.value == nil { -+ d.value = new(big.Int) -+ } -+} -+ -+// Min returns the smallest Decimal that was passed in the arguments. -+// -+// To call this function with an array, you must do: -+// -+// Min(arr[0], arr[1:]...) -+// -+// This makes it harder to accidentally call Min with 0 arguments. -+func Min(first Decimal, rest ...Decimal) Decimal { -+ ans := first -+ for _, item := range rest { -+ if item.Cmp(ans) < 0 { -+ ans = item -+ } -+ } -+ return ans -+} -+ -+// Max returns the largest Decimal that was passed in the arguments. -+// -+// To call this function with an array, you must do: -+// -+// Max(arr[0], arr[1:]...) -+// -+// This makes it harder to accidentally call Max with 0 arguments. -+func Max(first Decimal, rest ...Decimal) Decimal { -+ ans := first -+ for _, item := range rest { -+ if item.Cmp(ans) > 0 { -+ ans = item -+ } -+ } -+ return ans -+} -+ -+// Sum returns the combined total of the provided first and rest Decimals -+func Sum(first Decimal, rest ...Decimal) Decimal { -+ total := first -+ for _, item := range rest { -+ total = total.Add(item) -+ } -+ -+ return total -+} -+ -+// Avg returns the average value of the provided first and rest Decimals -+func Avg(first Decimal, rest ...Decimal) Decimal { -+ count := New(int64(len(rest)+1), 0) -+ sum := Sum(first, rest...) -+ return sum.Div(count) -+} -+ -+// RescalePair rescales two decimals to common exponential value (minimal exp of both decimals) -+func RescalePair(d1 Decimal, d2 Decimal) (Decimal, Decimal) { -+ d1.ensureInitialized() -+ d2.ensureInitialized() -+ -+ if d1.exp == d2.exp { -+ return d1, d2 -+ } -+ -+ baseScale := min(d1.exp, d2.exp) -+ if baseScale != d1.exp { -+ return d1.rescale(baseScale), d2 -+ } -+ return d1, d2.rescale(baseScale) -+} -+ -+func min(x, y int32) int32 { -+ if x >= y { -+ return y -+ } -+ return x -+} -+ -+func unquoteIfQuoted(value interface{}) (string, error) { -+ var bytes []byte -+ -+ switch v := value.(type) { -+ case string: -+ bytes = []byte(v) -+ case []byte: -+ bytes = v -+ default: -+ return """", fmt.Errorf(""could not convert value '%+v' to byte array of type '%T'"", -+ value, value) -+ } -+ -+ // If the amount is quoted, strip the quotes -+ if len(bytes) > 2 && bytes[0] == '""' && bytes[len(bytes)-1] == '""' { -+ bytes = bytes[1 : len(bytes)-1] -+ } -+ return string(bytes), nil -+} -+ -+// NullDecimal represents a nullable decimal with compatibility for -+// scanning null values from the database. -+type NullDecimal struct { -+ Decimal Decimal -+ Valid bool -+} -+ -+// Scan implements the sql.Scanner interface for database deserialization. -+func (d *NullDecimal) Scan(value interface{}) error { -+ if value == nil { -+ d.Valid = false -+ return nil -+ } -+ d.Valid = true -+ return d.Decimal.Scan(value) -+} -+ -+// Value implements the driver.Valuer interface for database serialization. -+func (d NullDecimal) Value() (driver.Value, error) { -+ if !d.Valid { -+ return nil, nil -+ } -+ return d.Decimal.Value() -+} -+ -+// UnmarshalJSON implements the json.Unmarshaler interface. -+func (d *NullDecimal) UnmarshalJSON(decimalBytes []byte) error { -+ if string(decimalBytes) == ""null"" { -+ d.Valid = false -+ return nil -+ } -+ d.Valid = true -+ return d.Decimal.UnmarshalJSON(decimalBytes) -+} -+ -+// MarshalJSON implements the json.Marshaler interface. -+func (d NullDecimal) MarshalJSON() ([]byte, error) { -+ if !d.Valid { -+ return []byte(""null""), nil -+ } -+ return d.Decimal.MarshalJSON() -+} -+ -+// Trig functions -+ -+// Atan returns the arctangent, in radians, of x. -+func (d Decimal) Atan() Decimal { -+ if d.Equal(NewFromFloat(0.0)) { -+ return d -+ } -+ if d.GreaterThan(NewFromFloat(0.0)) { -+ return d.satan() -+ } -+ return d.Neg().satan().Neg() -+} -+ -+func (d Decimal) xatan() Decimal { -+ P0 := NewFromFloat(-8.750608600031904122785e-01) -+ P1 := NewFromFloat(-1.615753718733365076637e+01) -+ P2 := NewFromFloat(-7.500855792314704667340e+01) -+ P3 := NewFromFloat(-1.228866684490136173410e+02) -+ P4 := NewFromFloat(-6.485021904942025371773e+01) -+ Q0 := NewFromFloat(2.485846490142306297962e+01) -+ Q1 := NewFromFloat(1.650270098316988542046e+02) -+ Q2 := NewFromFloat(4.328810604912902668951e+02) -+ Q3 := NewFromFloat(4.853903996359136964868e+02) -+ Q4 := NewFromFloat(1.945506571482613964425e+02) -+ z := d.Mul(d) -+ b1 := P0.Mul(z).Add(P1).Mul(z).Add(P2).Mul(z).Add(P3).Mul(z).Add(P4).Mul(z) -+ b2 := z.Add(Q0).Mul(z).Add(Q1).Mul(z).Add(Q2).Mul(z).Add(Q3).Mul(z).Add(Q4) -+ z = b1.Div(b2) -+ z = d.Mul(z).Add(d) -+ return z -+} -+ -+// satan reduces its argument (known to be positive) -+// to the range [0, 0.66] and calls xatan. -+func (d Decimal) satan() Decimal { -+ Morebits := NewFromFloat(6.123233995736765886130e-17) // pi/2 = PIO2 + Morebits -+ Tan3pio8 := NewFromFloat(2.41421356237309504880) // tan(3*pi/8) -+ pi := NewFromFloat(3.14159265358979323846264338327950288419716939937510582097494459) -+ -+ if d.LessThanOrEqual(NewFromFloat(0.66)) { -+ return d.xatan() -+ } -+ if d.GreaterThan(Tan3pio8) { -+ return pi.Div(NewFromFloat(2.0)).Sub(NewFromFloat(1.0).Div(d).xatan()).Add(Morebits) -+ } -+ return pi.Div(NewFromFloat(4.0)).Add((d.Sub(NewFromFloat(1.0)).Div(d.Add(NewFromFloat(1.0)))).xatan()).Add(NewFromFloat(0.5).Mul(Morebits)) -+} -+ -+// sin coefficients -+var _sin = [...]Decimal{ -+ NewFromFloat(1.58962301576546568060e-10), // 0x3de5d8fd1fd19ccd -+ NewFromFloat(-2.50507477628578072866e-8), // 0xbe5ae5e5a9291f5d -+ NewFromFloat(2.75573136213857245213e-6), // 0x3ec71de3567d48a1 -+ NewFromFloat(-1.98412698295895385996e-4), // 0xbf2a01a019bfdf03 -+ NewFromFloat(8.33333333332211858878e-3), // 0x3f8111111110f7d0 -+ NewFromFloat(-1.66666666666666307295e-1), // 0xbfc5555555555548 -+} -+ -+// Sin returns the sine of the radian argument x. -+func (d Decimal) Sin() Decimal { -+ PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts -+ PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, -+ PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, -+ M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi -+ -+ if d.Equal(NewFromFloat(0.0)) { -+ return d -+ } -+ // make argument positive but save the sign -+ sign := false -+ if d.LessThan(NewFromFloat(0.0)) { -+ d = d.Neg() -+ sign = true -+ } -+ -+ j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle -+ y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float -+ -+ // map zeros to origin -+ if j&1 == 1 { -+ j++ -+ y = y.Add(NewFromFloat(1.0)) -+ } -+ j &= 7 // octant modulo 2Pi radians (360 degrees) -+ // reflect in x axis -+ if j > 3 { -+ sign = !sign -+ j -= 4 -+ } -+ z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic -+ zz := z.Mul(z) -+ -+ if j == 1 || j == 2 { -+ w := zz.Mul(zz).Mul(_cos[0].Mul(zz).Add(_cos[1]).Mul(zz).Add(_cos[2]).Mul(zz).Add(_cos[3]).Mul(zz).Add(_cos[4]).Mul(zz).Add(_cos[5])) -+ y = NewFromFloat(1.0).Sub(NewFromFloat(0.5).Mul(zz)).Add(w) -+ } else { -+ y = z.Add(z.Mul(zz).Mul(_sin[0].Mul(zz).Add(_sin[1]).Mul(zz).Add(_sin[2]).Mul(zz).Add(_sin[3]).Mul(zz).Add(_sin[4]).Mul(zz).Add(_sin[5]))) -+ } -+ if sign { -+ y = y.Neg() -+ } -+ return y -+} -+ -+// cos coefficients -+var _cos = [...]Decimal{ -+ NewFromFloat(-1.13585365213876817300e-11), // 0xbda8fa49a0861a9b -+ NewFromFloat(2.08757008419747316778e-9), // 0x3e21ee9d7b4e3f05 -+ NewFromFloat(-2.75573141792967388112e-7), // 0xbe927e4f7eac4bc6 -+ NewFromFloat(2.48015872888517045348e-5), // 0x3efa01a019c844f5 -+ NewFromFloat(-1.38888888888730564116e-3), // 0xbf56c16c16c14f91 -+ NewFromFloat(4.16666666666665929218e-2), // 0x3fa555555555554b -+} -+ -+// Cos returns the cosine of the radian argument x. -+func (d Decimal) Cos() Decimal { -+ -+ PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts -+ PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, -+ PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, -+ M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi -+ -+ // make argument positive -+ sign := false -+ if d.LessThan(NewFromFloat(0.0)) { -+ d = d.Neg() -+ } -+ -+ j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle -+ y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float -+ -+ // map zeros to origin -+ if j&1 == 1 { -+ j++ -+ y = y.Add(NewFromFloat(1.0)) -+ } -+ j &= 7 // octant modulo 2Pi radians (360 degrees) -+ // reflect in x axis -+ if j > 3 { -+ sign = !sign -+ j -= 4 -+ } -+ if j > 1 { -+ sign = !sign -+ } -+ -+ z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic -+ zz := z.Mul(z) -+ -+ if j == 1 || j == 2 { -+ y = z.Add(z.Mul(zz).Mul(_sin[0].Mul(zz).Add(_sin[1]).Mul(zz).Add(_sin[2]).Mul(zz).Add(_sin[3]).Mul(zz).Add(_sin[4]).Mul(zz).Add(_sin[5]))) -+ } else { -+ w := zz.Mul(zz).Mul(_cos[0].Mul(zz).Add(_cos[1]).Mul(zz).Add(_cos[2]).Mul(zz).Add(_cos[3]).Mul(zz).Add(_cos[4]).Mul(zz).Add(_cos[5])) -+ y = NewFromFloat(1.0).Sub(NewFromFloat(0.5).Mul(zz)).Add(w) -+ } -+ if sign { -+ y = y.Neg() -+ } -+ return y -+} -+ -+var _tanP = [...]Decimal{ -+ NewFromFloat(-1.30936939181383777646e+4), // 0xc0c992d8d24f3f38 -+ NewFromFloat(1.15351664838587416140e+6), // 0x413199eca5fc9ddd -+ NewFromFloat(-1.79565251976484877988e+7), // 0xc1711fead3299176 -+} -+var _tanQ = [...]Decimal{ -+ NewFromFloat(1.00000000000000000000e+0), -+ NewFromFloat(1.36812963470692954678e+4), //0x40cab8a5eeb36572 -+ NewFromFloat(-1.32089234440210967447e+6), //0xc13427bc582abc96 -+ NewFromFloat(2.50083801823357915839e+7), //0x4177d98fc2ead8ef -+ NewFromFloat(-5.38695755929454629881e+7), //0xc189afe03cbe5a31 -+} -+ -+// Tan returns the tangent of the radian argument x. -+func (d Decimal) Tan() Decimal { -+ -+ PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts -+ PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, -+ PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, -+ M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi -+ -+ if d.Equal(NewFromFloat(0.0)) { -+ return d -+ } -+ -+ // make argument positive but save the sign -+ sign := false -+ if d.LessThan(NewFromFloat(0.0)) { -+ d = d.Neg() -+ sign = true -+ } -+ -+ j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle -+ y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float -+ -+ // map zeros to origin -+ if j&1 == 1 { -+ j++ -+ y = y.Add(NewFromFloat(1.0)) -+ } -+ -+ z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic -+ zz := z.Mul(z) -+ -+ if zz.GreaterThan(NewFromFloat(1e-14)) { -+ w := zz.Mul(_tanP[0].Mul(zz).Add(_tanP[1]).Mul(zz).Add(_tanP[2])) -+ x := zz.Add(_tanQ[1]).Mul(zz).Add(_tanQ[2]).Mul(zz).Add(_tanQ[3]).Mul(zz).Add(_tanQ[4]) -+ y = z.Add(z.Mul(w.Div(x))) -+ } else { -+ y = z -+ } -+ if j&2 == 2 { -+ y = NewFromFloat(-1.0).Div(y) -+ } -+ if sign { -+ y = y.Neg() -+ } -+ return y -+} -diff --git a/vendor/github.com/shopspring/decimal/go.mod b/vendor/github.com/shopspring/decimal/go.mod -new file mode 100644 -index 0000000000000..ae1b7aa3c7058 ---- /dev/null -+++ b/vendor/github.com/shopspring/decimal/go.mod -@@ -0,0 +1,3 @@ -+module github.com/shopspring/decimal -+ -+go 1.13 -diff --git a/vendor/github.com/shopspring/decimal/rounding.go b/vendor/github.com/shopspring/decimal/rounding.go -new file mode 100644 -index 0000000000000..8008f55cb9801 ---- /dev/null -+++ b/vendor/github.com/shopspring/decimal/rounding.go -@@ -0,0 +1,119 @@ -+// Copyright 2009 The Go Authors. All rights reserved. -+// Use of this source code is governed by a BSD-style -+// license that can be found in the LICENSE file. -+ -+// Multiprecision decimal numbers. -+// For floating-point formatting only; not general purpose. -+// Only operations are assign and (binary) left/right shift. -+// Can do binary floating point in multiprecision decimal precisely -+// because 2 divides 10; cannot do decimal floating point -+// in multiprecision binary precisely. -+ -+package decimal -+ -+type floatInfo struct { -+ mantbits uint -+ expbits uint -+ bias int -+} -+ -+var float32info = floatInfo{23, 8, -127} -+var float64info = floatInfo{52, 11, -1023} -+ -+// roundShortest rounds d (= mant * 2^exp) to the shortest number of digits -+// that will let the original floating point value be precisely reconstructed. -+func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) { -+ // If mantissa is zero, the number is zero; stop now. -+ if mant == 0 { -+ d.nd = 0 -+ return -+ } -+ -+ // Compute upper and lower such that any decimal number -+ // between upper and lower (possibly inclusive) -+ // will round to the original floating point number. -+ -+ // We may see at once that the number is already shortest. -+ // -+ // Suppose d is not denormal, so that 2^exp <= d < 10^dp. -+ // The closest shorter number is at least 10^(dp-nd) away. -+ // The lower/upper bounds computed below are at distance -+ // at most 2^(exp-mantbits). -+ // -+ // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits), -+ // or equivalently log2(10)*(dp-nd) > exp-mantbits. -+ // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32). -+ minexp := flt.bias + 1 // minimum possible exponent -+ if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) { -+ // The number is already shortest. -+ return -+ } -+ -+ // d = mant << (exp - mantbits) -+ // Next highest floating point number is mant+1 << exp-mantbits. -+ // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1. -+ upper := new(decimal) -+ upper.Assign(mant*2 + 1) -+ upper.Shift(exp - int(flt.mantbits) - 1) -+ -+ // d = mant << (exp - mantbits) -+ // Next lowest floating point number is mant-1 << exp-mantbits, -+ // unless mant-1 drops the significant bit and exp is not the minimum exp, -+ // in which case the next lowest is mant*2-1 << exp-mantbits-1. -+ // Either way, call it mantlo << explo-mantbits. -+ // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1. -+ var mantlo uint64 -+ var explo int -+ if mant > 1<. -+// -+// Use of this source code is governed by an MIT-style -+// license that can be found in the LICENSE file. -+ -+// Package cast provides easy and safe casting in Go. -+package cast -+ -+import ""time"" -+ -+// ToBool casts an interface to a bool type. -+func ToBool(i interface{}) bool { -+ v, _ := ToBoolE(i) -+ return v -+} -+ -+// ToTime casts an interface to a time.Time type. -+func ToTime(i interface{}) time.Time { -+ v, _ := ToTimeE(i) -+ return v -+} -+ -+// ToDuration casts an interface to a time.Duration type. -+func ToDuration(i interface{}) time.Duration { -+ v, _ := ToDurationE(i) -+ return v -+} -+ -+// ToFloat64 casts an interface to a float64 type. -+func ToFloat64(i interface{}) float64 { -+ v, _ := ToFloat64E(i) -+ return v -+} -+ -+// ToFloat32 casts an interface to a float32 type. -+func ToFloat32(i interface{}) float32 { -+ v, _ := ToFloat32E(i) -+ return v -+} -+ -+// ToInt64 casts an interface to an int64 type. -+func ToInt64(i interface{}) int64 { -+ v, _ := ToInt64E(i) -+ return v -+} -+ -+// ToInt32 casts an interface to an int32 type. -+func ToInt32(i interface{}) int32 { -+ v, _ := ToInt32E(i) -+ return v -+} -+ -+// ToInt16 casts an interface to an int16 type. -+func ToInt16(i interface{}) int16 { -+ v, _ := ToInt16E(i) -+ return v -+} -+ -+// ToInt8 casts an interface to an int8 type. -+func ToInt8(i interface{}) int8 { -+ v, _ := ToInt8E(i) -+ return v -+} -+ -+// ToInt casts an interface to an int type. -+func ToInt(i interface{}) int { -+ v, _ := ToIntE(i) -+ return v -+} -+ -+// ToUint casts an interface to a uint type. -+func ToUint(i interface{}) uint { -+ v, _ := ToUintE(i) -+ return v -+} -+ -+// ToUint64 casts an interface to a uint64 type. -+func ToUint64(i interface{}) uint64 { -+ v, _ := ToUint64E(i) -+ return v -+} -+ -+// ToUint32 casts an interface to a uint32 type. -+func ToUint32(i interface{}) uint32 { -+ v, _ := ToUint32E(i) -+ return v -+} -+ -+// ToUint16 casts an interface to a uint16 type. -+func ToUint16(i interface{}) uint16 { -+ v, _ := ToUint16E(i) -+ return v -+} -+ -+// ToUint8 casts an interface to a uint8 type. -+func ToUint8(i interface{}) uint8 { -+ v, _ := ToUint8E(i) -+ return v -+} -+ -+// ToString casts an interface to a string type. -+func ToString(i interface{}) string { -+ v, _ := ToStringE(i) -+ return v -+} -+ -+// ToStringMapString casts an interface to a map[string]string type. -+func ToStringMapString(i interface{}) map[string]string { -+ v, _ := ToStringMapStringE(i) -+ return v -+} -+ -+// ToStringMapStringSlice casts an interface to a map[string][]string type. -+func ToStringMapStringSlice(i interface{}) map[string][]string { -+ v, _ := ToStringMapStringSliceE(i) -+ return v -+} -+ -+// ToStringMapBool casts an interface to a map[string]bool type. -+func ToStringMapBool(i interface{}) map[string]bool { -+ v, _ := ToStringMapBoolE(i) -+ return v -+} -+ -+// ToStringMapInt casts an interface to a map[string]int type. -+func ToStringMapInt(i interface{}) map[string]int { -+ v, _ := ToStringMapIntE(i) -+ return v -+} -+ -+// ToStringMapInt64 casts an interface to a map[string]int64 type. -+func ToStringMapInt64(i interface{}) map[string]int64 { -+ v, _ := ToStringMapInt64E(i) -+ return v -+} -+ -+// ToStringMap casts an interface to a map[string]interface{} type. -+func ToStringMap(i interface{}) map[string]interface{} { -+ v, _ := ToStringMapE(i) -+ return v -+} -+ -+// ToSlice casts an interface to a []interface{} type. -+func ToSlice(i interface{}) []interface{} { -+ v, _ := ToSliceE(i) -+ return v -+} -+ -+// ToBoolSlice casts an interface to a []bool type. -+func ToBoolSlice(i interface{}) []bool { -+ v, _ := ToBoolSliceE(i) -+ return v -+} -+ -+// ToStringSlice casts an interface to a []string type. -+func ToStringSlice(i interface{}) []string { -+ v, _ := ToStringSliceE(i) -+ return v -+} -+ -+// ToIntSlice casts an interface to a []int type. -+func ToIntSlice(i interface{}) []int { -+ v, _ := ToIntSliceE(i) -+ return v -+} -+ -+// ToDurationSlice casts an interface to a []time.Duration type. -+func ToDurationSlice(i interface{}) []time.Duration { -+ v, _ := ToDurationSliceE(i) -+ return v -+} -diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go -new file mode 100644 -index 0000000000000..70c7291bed9b4 ---- /dev/null -+++ b/vendor/github.com/spf13/cast/caste.go -@@ -0,0 +1,1249 @@ -+// Copyright © 2014 Steve Francia . -+// -+// Use of this source code is governed by an MIT-style -+// license that can be found in the LICENSE file. -+ -+package cast -+ -+import ( -+ ""encoding/json"" -+ ""errors"" -+ ""fmt"" -+ ""html/template"" -+ ""reflect"" -+ ""strconv"" -+ ""strings"" -+ ""time"" -+) -+ -+var errNegativeNotAllowed = errors.New(""unable to cast negative value"") -+ -+// ToTimeE casts an interface to a time.Time type. -+func ToTimeE(i interface{}) (tim time.Time, err error) { -+ i = indirect(i) -+ -+ switch v := i.(type) { -+ case time.Time: -+ return v, nil -+ case string: -+ return StringToDate(v) -+ case int: -+ return time.Unix(int64(v), 0), nil -+ case int64: -+ return time.Unix(v, 0), nil -+ case int32: -+ return time.Unix(int64(v), 0), nil -+ case uint: -+ return time.Unix(int64(v), 0), nil -+ case uint64: -+ return time.Unix(int64(v), 0), nil -+ case uint32: -+ return time.Unix(int64(v), 0), nil -+ default: -+ return time.Time{}, fmt.Errorf(""unable to cast %#v of type %T to Time"", i, i) -+ } -+} -+ -+// ToDurationE casts an interface to a time.Duration type. -+func ToDurationE(i interface{}) (d time.Duration, err error) { -+ i = indirect(i) -+ -+ switch s := i.(type) { -+ case time.Duration: -+ return s, nil -+ case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8: -+ d = time.Duration(ToInt64(s)) -+ return -+ case float32, float64: -+ d = time.Duration(ToFloat64(s)) -+ return -+ case string: -+ if strings.ContainsAny(s, ""nsuµmh"") { -+ d, err = time.ParseDuration(s) -+ } else { -+ d, err = time.ParseDuration(s + ""ns"") -+ } -+ return -+ default: -+ err = fmt.Errorf(""unable to cast %#v of type %T to Duration"", i, i) -+ return -+ } -+} -+ -+// ToBoolE casts an interface to a bool type. -+func ToBoolE(i interface{}) (bool, error) { -+ i = indirect(i) -+ -+ switch b := i.(type) { -+ case bool: -+ return b, nil -+ case nil: -+ return false, nil -+ case int: -+ if i.(int) != 0 { -+ return true, nil -+ } -+ return false, nil -+ case string: -+ return strconv.ParseBool(i.(string)) -+ default: -+ return false, fmt.Errorf(""unable to cast %#v of type %T to bool"", i, i) -+ } -+} -+ -+// ToFloat64E casts an interface to a float64 type. -+func ToFloat64E(i interface{}) (float64, error) { -+ i = indirect(i) -+ -+ switch s := i.(type) { -+ case float64: -+ return s, nil -+ case float32: -+ return float64(s), nil -+ case int: -+ return float64(s), nil -+ case int64: -+ return float64(s), nil -+ case int32: -+ return float64(s), nil -+ case int16: -+ return float64(s), nil -+ case int8: -+ return float64(s), nil -+ case uint: -+ return float64(s), nil -+ case uint64: -+ return float64(s), nil -+ case uint32: -+ return float64(s), nil -+ case uint16: -+ return float64(s), nil -+ case uint8: -+ return float64(s), nil -+ case string: -+ v, err := strconv.ParseFloat(s, 64) -+ if err == nil { -+ return v, nil -+ } -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to float64"", i, i) -+ case bool: -+ if s { -+ return 1, nil -+ } -+ return 0, nil -+ default: -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to float64"", i, i) -+ } -+} -+ -+// ToFloat32E casts an interface to a float32 type. -+func ToFloat32E(i interface{}) (float32, error) { -+ i = indirect(i) -+ -+ switch s := i.(type) { -+ case float64: -+ return float32(s), nil -+ case float32: -+ return s, nil -+ case int: -+ return float32(s), nil -+ case int64: -+ return float32(s), nil -+ case int32: -+ return float32(s), nil -+ case int16: -+ return float32(s), nil -+ case int8: -+ return float32(s), nil -+ case uint: -+ return float32(s), nil -+ case uint64: -+ return float32(s), nil -+ case uint32: -+ return float32(s), nil -+ case uint16: -+ return float32(s), nil -+ case uint8: -+ return float32(s), nil -+ case string: -+ v, err := strconv.ParseFloat(s, 32) -+ if err == nil { -+ return float32(v), nil -+ } -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to float32"", i, i) -+ case bool: -+ if s { -+ return 1, nil -+ } -+ return 0, nil -+ default: -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to float32"", i, i) -+ } -+} -+ -+// ToInt64E casts an interface to an int64 type. -+func ToInt64E(i interface{}) (int64, error) { -+ i = indirect(i) -+ -+ switch s := i.(type) { -+ case int: -+ return int64(s), nil -+ case int64: -+ return s, nil -+ case int32: -+ return int64(s), nil -+ case int16: -+ return int64(s), nil -+ case int8: -+ return int64(s), nil -+ case uint: -+ return int64(s), nil -+ case uint64: -+ return int64(s), nil -+ case uint32: -+ return int64(s), nil -+ case uint16: -+ return int64(s), nil -+ case uint8: -+ return int64(s), nil -+ case float64: -+ return int64(s), nil -+ case float32: -+ return int64(s), nil -+ case string: -+ v, err := strconv.ParseInt(s, 0, 0) -+ if err == nil { -+ return v, nil -+ } -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to int64"", i, i) -+ case bool: -+ if s { -+ return 1, nil -+ } -+ return 0, nil -+ case nil: -+ return 0, nil -+ default: -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to int64"", i, i) -+ } -+} -+ -+// ToInt32E casts an interface to an int32 type. -+func ToInt32E(i interface{}) (int32, error) { -+ i = indirect(i) -+ -+ switch s := i.(type) { -+ case int: -+ return int32(s), nil -+ case int64: -+ return int32(s), nil -+ case int32: -+ return s, nil -+ case int16: -+ return int32(s), nil -+ case int8: -+ return int32(s), nil -+ case uint: -+ return int32(s), nil -+ case uint64: -+ return int32(s), nil -+ case uint32: -+ return int32(s), nil -+ case uint16: -+ return int32(s), nil -+ case uint8: -+ return int32(s), nil -+ case float64: -+ return int32(s), nil -+ case float32: -+ return int32(s), nil -+ case string: -+ v, err := strconv.ParseInt(s, 0, 0) -+ if err == nil { -+ return int32(v), nil -+ } -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to int32"", i, i) -+ case bool: -+ if s { -+ return 1, nil -+ } -+ return 0, nil -+ case nil: -+ return 0, nil -+ default: -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to int32"", i, i) -+ } -+} -+ -+// ToInt16E casts an interface to an int16 type. -+func ToInt16E(i interface{}) (int16, error) { -+ i = indirect(i) -+ -+ switch s := i.(type) { -+ case int: -+ return int16(s), nil -+ case int64: -+ return int16(s), nil -+ case int32: -+ return int16(s), nil -+ case int16: -+ return s, nil -+ case int8: -+ return int16(s), nil -+ case uint: -+ return int16(s), nil -+ case uint64: -+ return int16(s), nil -+ case uint32: -+ return int16(s), nil -+ case uint16: -+ return int16(s), nil -+ case uint8: -+ return int16(s), nil -+ case float64: -+ return int16(s), nil -+ case float32: -+ return int16(s), nil -+ case string: -+ v, err := strconv.ParseInt(s, 0, 0) -+ if err == nil { -+ return int16(v), nil -+ } -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to int16"", i, i) -+ case bool: -+ if s { -+ return 1, nil -+ } -+ return 0, nil -+ case nil: -+ return 0, nil -+ default: -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to int16"", i, i) -+ } -+} -+ -+// ToInt8E casts an interface to an int8 type. -+func ToInt8E(i interface{}) (int8, error) { -+ i = indirect(i) -+ -+ switch s := i.(type) { -+ case int: -+ return int8(s), nil -+ case int64: -+ return int8(s), nil -+ case int32: -+ return int8(s), nil -+ case int16: -+ return int8(s), nil -+ case int8: -+ return s, nil -+ case uint: -+ return int8(s), nil -+ case uint64: -+ return int8(s), nil -+ case uint32: -+ return int8(s), nil -+ case uint16: -+ return int8(s), nil -+ case uint8: -+ return int8(s), nil -+ case float64: -+ return int8(s), nil -+ case float32: -+ return int8(s), nil -+ case string: -+ v, err := strconv.ParseInt(s, 0, 0) -+ if err == nil { -+ return int8(v), nil -+ } -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to int8"", i, i) -+ case bool: -+ if s { -+ return 1, nil -+ } -+ return 0, nil -+ case nil: -+ return 0, nil -+ default: -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to int8"", i, i) -+ } -+} -+ -+// ToIntE casts an interface to an int type. -+func ToIntE(i interface{}) (int, error) { -+ i = indirect(i) -+ -+ switch s := i.(type) { -+ case int: -+ return s, nil -+ case int64: -+ return int(s), nil -+ case int32: -+ return int(s), nil -+ case int16: -+ return int(s), nil -+ case int8: -+ return int(s), nil -+ case uint: -+ return int(s), nil -+ case uint64: -+ return int(s), nil -+ case uint32: -+ return int(s), nil -+ case uint16: -+ return int(s), nil -+ case uint8: -+ return int(s), nil -+ case float64: -+ return int(s), nil -+ case float32: -+ return int(s), nil -+ case string: -+ v, err := strconv.ParseInt(s, 0, 0) -+ if err == nil { -+ return int(v), nil -+ } -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to int"", i, i) -+ case bool: -+ if s { -+ return 1, nil -+ } -+ return 0, nil -+ case nil: -+ return 0, nil -+ default: -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to int"", i, i) -+ } -+} -+ -+// ToUintE casts an interface to a uint type. -+func ToUintE(i interface{}) (uint, error) { -+ i = indirect(i) -+ -+ switch s := i.(type) { -+ case string: -+ v, err := strconv.ParseUint(s, 0, 0) -+ if err == nil { -+ return uint(v), nil -+ } -+ return 0, fmt.Errorf(""unable to cast %#v to uint: %s"", i, err) -+ case int: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint(s), nil -+ case int64: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint(s), nil -+ case int32: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint(s), nil -+ case int16: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint(s), nil -+ case int8: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint(s), nil -+ case uint: -+ return s, nil -+ case uint64: -+ return uint(s), nil -+ case uint32: -+ return uint(s), nil -+ case uint16: -+ return uint(s), nil -+ case uint8: -+ return uint(s), nil -+ case float64: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint(s), nil -+ case float32: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint(s), nil -+ case bool: -+ if s { -+ return 1, nil -+ } -+ return 0, nil -+ case nil: -+ return 0, nil -+ default: -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to uint"", i, i) -+ } -+} -+ -+// ToUint64E casts an interface to a uint64 type. -+func ToUint64E(i interface{}) (uint64, error) { -+ i = indirect(i) -+ -+ switch s := i.(type) { -+ case string: -+ v, err := strconv.ParseUint(s, 0, 64) -+ if err == nil { -+ return v, nil -+ } -+ return 0, fmt.Errorf(""unable to cast %#v to uint64: %s"", i, err) -+ case int: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint64(s), nil -+ case int64: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint64(s), nil -+ case int32: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint64(s), nil -+ case int16: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint64(s), nil -+ case int8: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint64(s), nil -+ case uint: -+ return uint64(s), nil -+ case uint64: -+ return s, nil -+ case uint32: -+ return uint64(s), nil -+ case uint16: -+ return uint64(s), nil -+ case uint8: -+ return uint64(s), nil -+ case float32: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint64(s), nil -+ case float64: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint64(s), nil -+ case bool: -+ if s { -+ return 1, nil -+ } -+ return 0, nil -+ case nil: -+ return 0, nil -+ default: -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to uint64"", i, i) -+ } -+} -+ -+// ToUint32E casts an interface to a uint32 type. -+func ToUint32E(i interface{}) (uint32, error) { -+ i = indirect(i) -+ -+ switch s := i.(type) { -+ case string: -+ v, err := strconv.ParseUint(s, 0, 32) -+ if err == nil { -+ return uint32(v), nil -+ } -+ return 0, fmt.Errorf(""unable to cast %#v to uint32: %s"", i, err) -+ case int: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint32(s), nil -+ case int64: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint32(s), nil -+ case int32: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint32(s), nil -+ case int16: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint32(s), nil -+ case int8: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint32(s), nil -+ case uint: -+ return uint32(s), nil -+ case uint64: -+ return uint32(s), nil -+ case uint32: -+ return s, nil -+ case uint16: -+ return uint32(s), nil -+ case uint8: -+ return uint32(s), nil -+ case float64: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint32(s), nil -+ case float32: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint32(s), nil -+ case bool: -+ if s { -+ return 1, nil -+ } -+ return 0, nil -+ case nil: -+ return 0, nil -+ default: -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to uint32"", i, i) -+ } -+} -+ -+// ToUint16E casts an interface to a uint16 type. -+func ToUint16E(i interface{}) (uint16, error) { -+ i = indirect(i) -+ -+ switch s := i.(type) { -+ case string: -+ v, err := strconv.ParseUint(s, 0, 16) -+ if err == nil { -+ return uint16(v), nil -+ } -+ return 0, fmt.Errorf(""unable to cast %#v to uint16: %s"", i, err) -+ case int: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint16(s), nil -+ case int64: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint16(s), nil -+ case int32: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint16(s), nil -+ case int16: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint16(s), nil -+ case int8: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint16(s), nil -+ case uint: -+ return uint16(s), nil -+ case uint64: -+ return uint16(s), nil -+ case uint32: -+ return uint16(s), nil -+ case uint16: -+ return s, nil -+ case uint8: -+ return uint16(s), nil -+ case float64: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint16(s), nil -+ case float32: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint16(s), nil -+ case bool: -+ if s { -+ return 1, nil -+ } -+ return 0, nil -+ case nil: -+ return 0, nil -+ default: -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to uint16"", i, i) -+ } -+} -+ -+// ToUint8E casts an interface to a uint type. -+func ToUint8E(i interface{}) (uint8, error) { -+ i = indirect(i) -+ -+ switch s := i.(type) { -+ case string: -+ v, err := strconv.ParseUint(s, 0, 8) -+ if err == nil { -+ return uint8(v), nil -+ } -+ return 0, fmt.Errorf(""unable to cast %#v to uint8: %s"", i, err) -+ case int: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint8(s), nil -+ case int64: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint8(s), nil -+ case int32: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint8(s), nil -+ case int16: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint8(s), nil -+ case int8: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint8(s), nil -+ case uint: -+ return uint8(s), nil -+ case uint64: -+ return uint8(s), nil -+ case uint32: -+ return uint8(s), nil -+ case uint16: -+ return uint8(s), nil -+ case uint8: -+ return s, nil -+ case float64: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint8(s), nil -+ case float32: -+ if s < 0 { -+ return 0, errNegativeNotAllowed -+ } -+ return uint8(s), nil -+ case bool: -+ if s { -+ return 1, nil -+ } -+ return 0, nil -+ case nil: -+ return 0, nil -+ default: -+ return 0, fmt.Errorf(""unable to cast %#v of type %T to uint8"", i, i) -+ } -+} -+ -+// From html/template/content.go -+// Copyright 2011 The Go Authors. All rights reserved. -+// indirect returns the value, after dereferencing as many times -+// as necessary to reach the base type (or nil). -+func indirect(a interface{}) interface{} { -+ if a == nil { -+ return nil -+ } -+ if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr { -+ // Avoid creating a reflect.Value if it's not a pointer. -+ return a -+ } -+ v := reflect.ValueOf(a) -+ for v.Kind() == reflect.Ptr && !v.IsNil() { -+ v = v.Elem() -+ } -+ return v.Interface() -+} -+ -+// From html/template/content.go -+// Copyright 2011 The Go Authors. All rights reserved. -+// indirectToStringerOrError returns the value, after dereferencing as many times -+// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer -+// or error, -+func indirectToStringerOrError(a interface{}) interface{} { -+ if a == nil { -+ return nil -+ } -+ -+ var errorType = reflect.TypeOf((*error)(nil)).Elem() -+ var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() -+ -+ v := reflect.ValueOf(a) -+ for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { -+ v = v.Elem() -+ } -+ return v.Interface() -+} -+ -+// ToStringE casts an interface to a string type. -+func ToStringE(i interface{}) (string, error) { -+ i = indirectToStringerOrError(i) -+ -+ switch s := i.(type) { -+ case string: -+ return s, nil -+ case bool: -+ return strconv.FormatBool(s), nil -+ case float64: -+ return strconv.FormatFloat(s, 'f', -1, 64), nil -+ case float32: -+ return strconv.FormatFloat(float64(s), 'f', -1, 32), nil -+ case int: -+ return strconv.Itoa(s), nil -+ case int64: -+ return strconv.FormatInt(s, 10), nil -+ case int32: -+ return strconv.Itoa(int(s)), nil -+ case int16: -+ return strconv.FormatInt(int64(s), 10), nil -+ case int8: -+ return strconv.FormatInt(int64(s), 10), nil -+ case uint: -+ return strconv.FormatUint(uint64(s), 10), nil -+ case uint64: -+ return strconv.FormatUint(uint64(s), 10), nil -+ case uint32: -+ return strconv.FormatUint(uint64(s), 10), nil -+ case uint16: -+ return strconv.FormatUint(uint64(s), 10), nil -+ case uint8: -+ return strconv.FormatUint(uint64(s), 10), nil -+ case []byte: -+ return string(s), nil -+ case template.HTML: -+ return string(s), nil -+ case template.URL: -+ return string(s), nil -+ case template.JS: -+ return string(s), nil -+ case template.CSS: -+ return string(s), nil -+ case template.HTMLAttr: -+ return string(s), nil -+ case nil: -+ return """", nil -+ case fmt.Stringer: -+ return s.String(), nil -+ case error: -+ return s.Error(), nil -+ default: -+ return """", fmt.Errorf(""unable to cast %#v of type %T to string"", i, i) -+ } -+} -+ -+// ToStringMapStringE casts an interface to a map[string]string type. -+func ToStringMapStringE(i interface{}) (map[string]string, error) { -+ var m = map[string]string{} -+ -+ switch v := i.(type) { -+ case map[string]string: -+ return v, nil -+ case map[string]interface{}: -+ for k, val := range v { -+ m[ToString(k)] = ToString(val) -+ } -+ return m, nil -+ case map[interface{}]string: -+ for k, val := range v { -+ m[ToString(k)] = ToString(val) -+ } -+ return m, nil -+ case map[interface{}]interface{}: -+ for k, val := range v { -+ m[ToString(k)] = ToString(val) -+ } -+ return m, nil -+ case string: -+ err := jsonStringToObject(v, &m) -+ return m, err -+ default: -+ return m, fmt.Errorf(""unable to cast %#v of type %T to map[string]string"", i, i) -+ } -+} -+ -+// ToStringMapStringSliceE casts an interface to a map[string][]string type. -+func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { -+ var m = map[string][]string{} -+ -+ switch v := i.(type) { -+ case map[string][]string: -+ return v, nil -+ case map[string][]interface{}: -+ for k, val := range v { -+ m[ToString(k)] = ToStringSlice(val) -+ } -+ return m, nil -+ case map[string]string: -+ for k, val := range v { -+ m[ToString(k)] = []string{val} -+ } -+ case map[string]interface{}: -+ for k, val := range v { -+ switch vt := val.(type) { -+ case []interface{}: -+ m[ToString(k)] = ToStringSlice(vt) -+ case []string: -+ m[ToString(k)] = vt -+ default: -+ m[ToString(k)] = []string{ToString(val)} -+ } -+ } -+ return m, nil -+ case map[interface{}][]string: -+ for k, val := range v { -+ m[ToString(k)] = ToStringSlice(val) -+ } -+ return m, nil -+ case map[interface{}]string: -+ for k, val := range v { -+ m[ToString(k)] = ToStringSlice(val) -+ } -+ return m, nil -+ case map[interface{}][]interface{}: -+ for k, val := range v { -+ m[ToString(k)] = ToStringSlice(val) -+ } -+ return m, nil -+ case map[interface{}]interface{}: -+ for k, val := range v { -+ key, err := ToStringE(k) -+ if err != nil { -+ return m, fmt.Errorf(""unable to cast %#v of type %T to map[string][]string"", i, i) -+ } -+ value, err := ToStringSliceE(val) -+ if err != nil { -+ return m, fmt.Errorf(""unable to cast %#v of type %T to map[string][]string"", i, i) -+ } -+ m[key] = value -+ } -+ case string: -+ err := jsonStringToObject(v, &m) -+ return m, err -+ default: -+ return m, fmt.Errorf(""unable to cast %#v of type %T to map[string][]string"", i, i) -+ } -+ return m, nil -+} -+ -+// ToStringMapBoolE casts an interface to a map[string]bool type. -+func ToStringMapBoolE(i interface{}) (map[string]bool, error) { -+ var m = map[string]bool{} -+ -+ switch v := i.(type) { -+ case map[interface{}]interface{}: -+ for k, val := range v { -+ m[ToString(k)] = ToBool(val) -+ } -+ return m, nil -+ case map[string]interface{}: -+ for k, val := range v { -+ m[ToString(k)] = ToBool(val) -+ } -+ return m, nil -+ case map[string]bool: -+ return v, nil -+ case string: -+ err := jsonStringToObject(v, &m) -+ return m, err -+ default: -+ return m, fmt.Errorf(""unable to cast %#v of type %T to map[string]bool"", i, i) -+ } -+} -+ -+// ToStringMapE casts an interface to a map[string]interface{} type. -+func ToStringMapE(i interface{}) (map[string]interface{}, error) { -+ var m = map[string]interface{}{} -+ -+ switch v := i.(type) { -+ case map[interface{}]interface{}: -+ for k, val := range v { -+ m[ToString(k)] = val -+ } -+ return m, nil -+ case map[string]interface{}: -+ return v, nil -+ case string: -+ err := jsonStringToObject(v, &m) -+ return m, err -+ default: -+ return m, fmt.Errorf(""unable to cast %#v of type %T to map[string]interface{}"", i, i) -+ } -+} -+ -+// ToStringMapIntE casts an interface to a map[string]int{} type. -+func ToStringMapIntE(i interface{}) (map[string]int, error) { -+ var m = map[string]int{} -+ if i == nil { -+ return m, fmt.Errorf(""unable to cast %#v of type %T to map[string]int"", i, i) -+ } -+ -+ switch v := i.(type) { -+ case map[interface{}]interface{}: -+ for k, val := range v { -+ m[ToString(k)] = ToInt(val) -+ } -+ return m, nil -+ case map[string]interface{}: -+ for k, val := range v { -+ m[k] = ToInt(val) -+ } -+ return m, nil -+ case map[string]int: -+ return v, nil -+ case string: -+ err := jsonStringToObject(v, &m) -+ return m, err -+ } -+ -+ if reflect.TypeOf(i).Kind() != reflect.Map { -+ return m, fmt.Errorf(""unable to cast %#v of type %T to map[string]int"", i, i) -+ } -+ -+ mVal := reflect.ValueOf(m) -+ v := reflect.ValueOf(i) -+ for _, keyVal := range v.MapKeys() { -+ val, err := ToIntE(v.MapIndex(keyVal).Interface()) -+ if err != nil { -+ return m, fmt.Errorf(""unable to cast %#v of type %T to map[string]int"", i, i) -+ } -+ mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) -+ } -+ return m, nil -+} -+ -+// ToStringMapInt64E casts an interface to a map[string]int64{} type. -+func ToStringMapInt64E(i interface{}) (map[string]int64, error) { -+ var m = map[string]int64{} -+ if i == nil { -+ return m, fmt.Errorf(""unable to cast %#v of type %T to map[string]int64"", i, i) -+ } -+ -+ switch v := i.(type) { -+ case map[interface{}]interface{}: -+ for k, val := range v { -+ m[ToString(k)] = ToInt64(val) -+ } -+ return m, nil -+ case map[string]interface{}: -+ for k, val := range v { -+ m[k] = ToInt64(val) -+ } -+ return m, nil -+ case map[string]int64: -+ return v, nil -+ case string: -+ err := jsonStringToObject(v, &m) -+ return m, err -+ } -+ -+ if reflect.TypeOf(i).Kind() != reflect.Map { -+ return m, fmt.Errorf(""unable to cast %#v of type %T to map[string]int64"", i, i) -+ } -+ mVal := reflect.ValueOf(m) -+ v := reflect.ValueOf(i) -+ for _, keyVal := range v.MapKeys() { -+ val, err := ToInt64E(v.MapIndex(keyVal).Interface()) -+ if err != nil { -+ return m, fmt.Errorf(""unable to cast %#v of type %T to map[string]int64"", i, i) -+ } -+ mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) -+ } -+ return m, nil -+} -+ -+// ToSliceE casts an interface to a []interface{} type. -+func ToSliceE(i interface{}) ([]interface{}, error) { -+ var s []interface{} -+ -+ switch v := i.(type) { -+ case []interface{}: -+ return append(s, v...), nil -+ case []map[string]interface{}: -+ for _, u := range v { -+ s = append(s, u) -+ } -+ return s, nil -+ default: -+ return s, fmt.Errorf(""unable to cast %#v of type %T to []interface{}"", i, i) -+ } -+} -+ -+// ToBoolSliceE casts an interface to a []bool type. -+func ToBoolSliceE(i interface{}) ([]bool, error) { -+ if i == nil { -+ return []bool{}, fmt.Errorf(""unable to cast %#v of type %T to []bool"", i, i) -+ } -+ -+ switch v := i.(type) { -+ case []bool: -+ return v, nil -+ } -+ -+ kind := reflect.TypeOf(i).Kind() -+ switch kind { -+ case reflect.Slice, reflect.Array: -+ s := reflect.ValueOf(i) -+ a := make([]bool, s.Len()) -+ for j := 0; j < s.Len(); j++ { -+ val, err := ToBoolE(s.Index(j).Interface()) -+ if err != nil { -+ return []bool{}, fmt.Errorf(""unable to cast %#v of type %T to []bool"", i, i) -+ } -+ a[j] = val -+ } -+ return a, nil -+ default: -+ return []bool{}, fmt.Errorf(""unable to cast %#v of type %T to []bool"", i, i) -+ } -+} -+ -+// ToStringSliceE casts an interface to a []string type. -+func ToStringSliceE(i interface{}) ([]string, error) { -+ var a []string -+ -+ switch v := i.(type) { -+ case []interface{}: -+ for _, u := range v { -+ a = append(a, ToString(u)) -+ } -+ return a, nil -+ case []string: -+ return v, nil -+ case string: -+ return strings.Fields(v), nil -+ case interface{}: -+ str, err := ToStringE(v) -+ if err != nil { -+ return a, fmt.Errorf(""unable to cast %#v of type %T to []string"", i, i) -+ } -+ return []string{str}, nil -+ default: -+ return a, fmt.Errorf(""unable to cast %#v of type %T to []string"", i, i) -+ } -+} -+ -+// ToIntSliceE casts an interface to a []int type. -+func ToIntSliceE(i interface{}) ([]int, error) { -+ if i == nil { -+ return []int{}, fmt.Errorf(""unable to cast %#v of type %T to []int"", i, i) -+ } -+ -+ switch v := i.(type) { -+ case []int: -+ return v, nil -+ } -+ -+ kind := reflect.TypeOf(i).Kind() -+ switch kind { -+ case reflect.Slice, reflect.Array: -+ s := reflect.ValueOf(i) -+ a := make([]int, s.Len()) -+ for j := 0; j < s.Len(); j++ { -+ val, err := ToIntE(s.Index(j).Interface()) -+ if err != nil { -+ return []int{}, fmt.Errorf(""unable to cast %#v of type %T to []int"", i, i) -+ } -+ a[j] = val -+ } -+ return a, nil -+ default: -+ return []int{}, fmt.Errorf(""unable to cast %#v of type %T to []int"", i, i) -+ } -+} -+ -+// ToDurationSliceE casts an interface to a []time.Duration type. -+func ToDurationSliceE(i interface{}) ([]time.Duration, error) { -+ if i == nil { -+ return []time.Duration{}, fmt.Errorf(""unable to cast %#v of type %T to []time.Duration"", i, i) -+ } -+ -+ switch v := i.(type) { -+ case []time.Duration: -+ return v, nil -+ } -+ -+ kind := reflect.TypeOf(i).Kind() -+ switch kind { -+ case reflect.Slice, reflect.Array: -+ s := reflect.ValueOf(i) -+ a := make([]time.Duration, s.Len()) -+ for j := 0; j < s.Len(); j++ { -+ val, err := ToDurationE(s.Index(j).Interface()) -+ if err != nil { -+ return []time.Duration{}, fmt.Errorf(""unable to cast %#v of type %T to []time.Duration"", i, i) -+ } -+ a[j] = val -+ } -+ return a, nil -+ default: -+ return []time.Duration{}, fmt.Errorf(""unable to cast %#v of type %T to []time.Duration"", i, i) -+ } -+} -+ -+// StringToDate attempts to parse a string into a time.Time type using a -+// predefined list of formats. If no suitable format is found, an error is -+// returned. -+func StringToDate(s string) (time.Time, error) { -+ return parseDateWith(s, []string{ -+ time.RFC3339, -+ ""2006-01-02T15:04:05"", // iso8601 without timezone -+ time.RFC1123Z, -+ time.RFC1123, -+ time.RFC822Z, -+ time.RFC822, -+ time.RFC850, -+ time.ANSIC, -+ time.UnixDate, -+ time.RubyDate, -+ ""2006-01-02 15:04:05.999999999 -0700 MST"", // Time.String() -+ ""2006-01-02"", -+ ""02 Jan 2006"", -+ ""2006-01-02T15:04:05-0700"", // RFC3339 without timezone hh:mm colon -+ ""2006-01-02 15:04:05 -07:00"", -+ ""2006-01-02 15:04:05 -0700"", -+ ""2006-01-02 15:04:05Z07:00"", // RFC3339 without T -+ ""2006-01-02 15:04:05Z0700"", // RFC3339 without T or timezone hh:mm colon -+ ""2006-01-02 15:04:05"", -+ time.Kitchen, -+ time.Stamp, -+ time.StampMilli, -+ time.StampMicro, -+ time.StampNano, -+ }) -+} -+ -+func parseDateWith(s string, dates []string) (d time.Time, e error) { -+ for _, dateType := range dates { -+ if d, e = time.Parse(dateType, s); e == nil { -+ return -+ } -+ } -+ return d, fmt.Errorf(""unable to parse date: %s"", s) -+} -+ -+// jsonStringToObject attempts to unmarshall a string as JSON into -+// the object passed as pointer. -+func jsonStringToObject(s string, v interface{}) error { -+ data := []byte(s) -+ return json.Unmarshal(data, v) -+} -diff --git a/vendor/github.com/spf13/cast/go.mod b/vendor/github.com/spf13/cast/go.mod -new file mode 100644 -index 0000000000000..c1c0232dd9371 ---- /dev/null -+++ b/vendor/github.com/spf13/cast/go.mod -@@ -0,0 +1,7 @@ -+module github.com/spf13/cast -+ -+require ( -+ github.com/davecgh/go-spew v1.1.1 // indirect -+ github.com/pmezard/go-difflib v1.0.0 // indirect -+ github.com/stretchr/testify v1.2.2 -+) -diff --git a/vendor/github.com/spf13/cast/go.sum b/vendor/github.com/spf13/cast/go.sum -new file mode 100644 -index 0000000000000..e03ee77d9e3b1 ---- /dev/null -+++ b/vendor/github.com/spf13/cast/go.sum -@@ -0,0 +1,6 @@ -+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go -new file mode 100644 -index 0000000000000..593f6530084f2 ---- /dev/null -+++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go -@@ -0,0 +1,77 @@ -+// Copyright 2012 The Go Authors. All rights reserved. -+// Use of this source code is governed by a BSD-style -+// license that can be found in the LICENSE file. -+ -+/* -+Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC -+2898 / PKCS #5 v2.0. -+ -+A key derivation function is useful when encrypting data based on a password -+or any other not-fully-random data. It uses a pseudorandom function to derive -+a secure encryption key based on the password. -+ -+While v2.0 of the standard defines only one pseudorandom function to use, -+HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved -+Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To -+choose, you can pass the `New` functions from the different SHA packages to -+pbkdf2.Key. -+*/ -+package pbkdf2 // import ""golang.org/x/crypto/pbkdf2"" -+ -+import ( -+ ""crypto/hmac"" -+ ""hash"" -+) -+ -+// Key derives a key from the password, salt and iteration count, returning a -+// []byte of length keylen that can be used as cryptographic key. The key is -+// derived based on the method described as PBKDF2 with the HMAC variant using -+// the supplied hash function. -+// -+// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you -+// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by -+// doing: -+// -+// dk := pbkdf2.Key([]byte(""some password""), salt, 4096, 32, sha1.New) -+// -+// Remember to get a good random salt. At least 8 bytes is recommended by the -+// RFC. -+// -+// Using a higher iteration count will increase the cost of an exhaustive -+// search but will also make derivation proportionally slower. -+func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { -+ prf := hmac.New(h, password) -+ hashLen := prf.Size() -+ numBlocks := (keyLen + hashLen - 1) / hashLen -+ -+ var buf [4]byte -+ dk := make([]byte, 0, numBlocks*hashLen) -+ U := make([]byte, hashLen) -+ for block := 1; block <= numBlocks; block++ { -+ // N.B.: || means concatenation, ^ means XOR -+ // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter -+ // U_1 = PRF(password, salt || uint(i)) -+ prf.Reset() -+ prf.Write(salt) -+ buf[0] = byte(block >> 24) -+ buf[1] = byte(block >> 16) -+ buf[2] = byte(block >> 8) -+ buf[3] = byte(block) -+ prf.Write(buf[:4]) -+ dk = prf.Sum(dk) -+ T := dk[len(dk)-hashLen:] -+ copy(U, T) -+ -+ // U_n = PRF(password, U_(n-1)) -+ for n := 2; n <= iter; n++ { -+ prf.Reset() -+ prf.Write(U) -+ U = U[:0] -+ U = prf.Sum(U) -+ for x := range U { -+ T[x] ^= U[x] -+ } -+ } -+ } -+ return dk[:keyLen] -+} -diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go -new file mode 100644 -index 0000000000000..2f81fe4148e95 ---- /dev/null -+++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go -@@ -0,0 +1,213 @@ -+// Copyright 2012 The Go Authors. All rights reserved. -+// Use of this source code is governed by a BSD-style -+// license that can be found in the LICENSE file. -+ -+// Package scrypt implements the scrypt key derivation function as defined in -+// Colin Percival's paper ""Stronger Key Derivation via Sequential Memory-Hard -+// Functions"" (https://www.tarsnap.com/scrypt/scrypt.pdf). -+package scrypt // import ""golang.org/x/crypto/scrypt"" -+ -+import ( -+ ""crypto/sha256"" -+ ""errors"" -+ ""math/bits"" -+ -+ ""golang.org/x/crypto/pbkdf2"" -+) -+ -+const maxInt = int(^uint(0) >> 1) -+ -+// blockCopy copies n numbers from src into dst. -+func blockCopy(dst, src []uint32, n int) { -+ copy(dst, src[:n]) -+} -+ -+// blockXOR XORs numbers from dst with n numbers from src. -+func blockXOR(dst, src []uint32, n int) { -+ for i, v := range src[:n] { -+ dst[i] ^= v -+ } -+} -+ -+// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in, -+// and puts the result into both tmp and out. -+func salsaXOR(tmp *[16]uint32, in, out []uint32) { -+ w0 := tmp[0] ^ in[0] -+ w1 := tmp[1] ^ in[1] -+ w2 := tmp[2] ^ in[2] -+ w3 := tmp[3] ^ in[3] -+ w4 := tmp[4] ^ in[4] -+ w5 := tmp[5] ^ in[5] -+ w6 := tmp[6] ^ in[6] -+ w7 := tmp[7] ^ in[7] -+ w8 := tmp[8] ^ in[8] -+ w9 := tmp[9] ^ in[9] -+ w10 := tmp[10] ^ in[10] -+ w11 := tmp[11] ^ in[11] -+ w12 := tmp[12] ^ in[12] -+ w13 := tmp[13] ^ in[13] -+ w14 := tmp[14] ^ in[14] -+ w15 := tmp[15] ^ in[15] -+ -+ x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8 -+ x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15 -+ -+ for i := 0; i < 8; i += 2 { -+ x4 ^= bits.RotateLeft32(x0+x12, 7) -+ x8 ^= bits.RotateLeft32(x4+x0, 9) -+ x12 ^= bits.RotateLeft32(x8+x4, 13) -+ x0 ^= bits.RotateLeft32(x12+x8, 18) -+ -+ x9 ^= bits.RotateLeft32(x5+x1, 7) -+ x13 ^= bits.RotateLeft32(x9+x5, 9) -+ x1 ^= bits.RotateLeft32(x13+x9, 13) -+ x5 ^= bits.RotateLeft32(x1+x13, 18) -+ -+ x14 ^= bits.RotateLeft32(x10+x6, 7) -+ x2 ^= bits.RotateLeft32(x14+x10, 9) -+ x6 ^= bits.RotateLeft32(x2+x14, 13) -+ x10 ^= bits.RotateLeft32(x6+x2, 18) -+ -+ x3 ^= bits.RotateLeft32(x15+x11, 7) -+ x7 ^= bits.RotateLeft32(x3+x15, 9) -+ x11 ^= bits.RotateLeft32(x7+x3, 13) -+ x15 ^= bits.RotateLeft32(x11+x7, 18) -+ -+ x1 ^= bits.RotateLeft32(x0+x3, 7) -+ x2 ^= bits.RotateLeft32(x1+x0, 9) -+ x3 ^= bits.RotateLeft32(x2+x1, 13) -+ x0 ^= bits.RotateLeft32(x3+x2, 18) -+ -+ x6 ^= bits.RotateLeft32(x5+x4, 7) -+ x7 ^= bits.RotateLeft32(x6+x5, 9) -+ x4 ^= bits.RotateLeft32(x7+x6, 13) -+ x5 ^= bits.RotateLeft32(x4+x7, 18) -+ -+ x11 ^= bits.RotateLeft32(x10+x9, 7) -+ x8 ^= bits.RotateLeft32(x11+x10, 9) -+ x9 ^= bits.RotateLeft32(x8+x11, 13) -+ x10 ^= bits.RotateLeft32(x9+x8, 18) -+ -+ x12 ^= bits.RotateLeft32(x15+x14, 7) -+ x13 ^= bits.RotateLeft32(x12+x15, 9) -+ x14 ^= bits.RotateLeft32(x13+x12, 13) -+ x15 ^= bits.RotateLeft32(x14+x13, 18) -+ } -+ x0 += w0 -+ x1 += w1 -+ x2 += w2 -+ x3 += w3 -+ x4 += w4 -+ x5 += w5 -+ x6 += w6 -+ x7 += w7 -+ x8 += w8 -+ x9 += w9 -+ x10 += w10 -+ x11 += w11 -+ x12 += w12 -+ x13 += w13 -+ x14 += w14 -+ x15 += w15 -+ -+ out[0], tmp[0] = x0, x0 -+ out[1], tmp[1] = x1, x1 -+ out[2], tmp[2] = x2, x2 -+ out[3], tmp[3] = x3, x3 -+ out[4], tmp[4] = x4, x4 -+ out[5], tmp[5] = x5, x5 -+ out[6], tmp[6] = x6, x6 -+ out[7], tmp[7] = x7, x7 -+ out[8], tmp[8] = x8, x8 -+ out[9], tmp[9] = x9, x9 -+ out[10], tmp[10] = x10, x10 -+ out[11], tmp[11] = x11, x11 -+ out[12], tmp[12] = x12, x12 -+ out[13], tmp[13] = x13, x13 -+ out[14], tmp[14] = x14, x14 -+ out[15], tmp[15] = x15, x15 -+} -+ -+func blockMix(tmp *[16]uint32, in, out []uint32, r int) { -+ blockCopy(tmp[:], in[(2*r-1)*16:], 16) -+ for i := 0; i < 2*r; i += 2 { -+ salsaXOR(tmp, in[i*16:], out[i*8:]) -+ salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:]) -+ } -+} -+ -+func integer(b []uint32, r int) uint64 { -+ j := (2*r - 1) * 16 -+ return uint64(b[j]) | uint64(b[j+1])<<32 -+} -+ -+func smix(b []byte, r, N int, v, xy []uint32) { -+ var tmp [16]uint32 -+ x := xy -+ y := xy[32*r:] -+ -+ j := 0 -+ for i := 0; i < 32*r; i++ { -+ x[i] = uint32(b[j]) | uint32(b[j+1])<<8 | uint32(b[j+2])<<16 | uint32(b[j+3])<<24 -+ j += 4 -+ } -+ for i := 0; i < N; i += 2 { -+ blockCopy(v[i*(32*r):], x, 32*r) -+ blockMix(&tmp, x, y, r) -+ -+ blockCopy(v[(i+1)*(32*r):], y, 32*r) -+ blockMix(&tmp, y, x, r) -+ } -+ for i := 0; i < N; i += 2 { -+ j := int(integer(x, r) & uint64(N-1)) -+ blockXOR(x, v[j*(32*r):], 32*r) -+ blockMix(&tmp, x, y, r) -+ -+ j = int(integer(y, r) & uint64(N-1)) -+ blockXOR(y, v[j*(32*r):], 32*r) -+ blockMix(&tmp, y, x, r) -+ } -+ j = 0 -+ for _, v := range x[:32*r] { -+ b[j+0] = byte(v >> 0) -+ b[j+1] = byte(v >> 8) -+ b[j+2] = byte(v >> 16) -+ b[j+3] = byte(v >> 24) -+ j += 4 -+ } -+} -+ -+// Key derives a key from the password, salt, and cost parameters, returning -+// a byte slice of length keyLen that can be used as cryptographic key. -+// -+// N is a CPU/memory cost parameter, which must be a power of two greater than 1. -+// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the -+// limits, the function returns a nil byte slice and an error. -+// -+// For example, you can get a derived key for e.g. AES-256 (which needs a -+// 32-byte key) by doing: -+// -+// dk, err := scrypt.Key([]byte(""some password""), salt, 32768, 8, 1, 32) -+// -+// The recommended parameters for interactive logins as of 2017 are N=32768, r=8 -+// and p=1. The parameters N, r, and p should be increased as memory latency and -+// CPU parallelism increases; consider setting N to the highest power of 2 you -+// can derive within 100 milliseconds. Remember to get a good random salt. -+func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) { -+ if N <= 1 || N&(N-1) != 0 { -+ return nil, errors.New(""scrypt: N must be > 1 and a power of 2"") -+ } -+ if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r { -+ return nil, errors.New(""scrypt: parameters are too large"") -+ } -+ -+ xy := make([]uint32, 64*r) -+ v := make([]uint32, 32*N*r) -+ b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New) -+ -+ for i := 0; i < p; i++ { -+ smix(b[i*128*r:], r, N, v, xy) -+ } -+ -+ return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil -+} -diff --git a/vendor/modules.txt b/vendor/modules.txt -index 4a8dd327f0a1c..6def38fbedbf5 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -49,6 +49,13 @@ github.com/Azure/go-autorest/autorest/validation - github.com/Azure/go-autorest/logger - # github.com/Azure/go-autorest/tracing v0.6.0 - github.com/Azure/go-autorest/tracing -+# github.com/Masterminds/goutils v1.1.1 -+github.com/Masterminds/goutils -+# github.com/Masterminds/semver/v3 v3.1.1 -+github.com/Masterminds/semver/v3 -+# github.com/Masterminds/sprig/v3 v3.2.2 -+## explicit -+github.com/Masterminds/sprig/v3 - # github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5 - github.com/Masterminds/squirrel - # github.com/Microsoft/go-winio v0.4.16 -@@ -563,7 +570,9 @@ github.com/hpcloud/tail/ratelimiter - github.com/hpcloud/tail/util - github.com/hpcloud/tail/watch - github.com/hpcloud/tail/winfile --# github.com/imdario/mergo v0.3.9 -+# github.com/huandu/xstrings v1.3.1 -+github.com/huandu/xstrings -+# github.com/imdario/mergo v0.3.11 - ## explicit - github.com/imdario/mergo - # github.com/influxdata/go-syslog/v3 v3.0.1-0.20201128200927-a1889d947b48 -@@ -650,11 +659,15 @@ github.com/minio/minio-go/v7/pkg/sse - github.com/minio/minio-go/v7/pkg/tags - # github.com/minio/sha256-simd v0.1.1 - github.com/minio/sha256-simd -+# github.com/mitchellh/copystructure v1.0.0 -+github.com/mitchellh/copystructure - # github.com/mitchellh/go-homedir v1.1.0 - github.com/mitchellh/go-homedir - # github.com/mitchellh/mapstructure v1.3.3 - ## explicit - github.com/mitchellh/mapstructure -+# github.com/mitchellh/reflectwalk v1.0.1 -+github.com/mitchellh/reflectwalk - # github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 - github.com/moby/term - github.com/moby/term/windows -@@ -841,6 +854,8 @@ github.com/sean-/seed - github.com/segmentio/fasthash/fnv1a - # github.com/sercand/kuberesolver v2.4.0+incompatible => github.com/sercand/kuberesolver v2.4.0+incompatible - github.com/sercand/kuberesolver -+# github.com/shopspring/decimal v1.2.0 -+github.com/shopspring/decimal - # github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 - ## explicit - github.com/shurcooL/httpfs/filter -@@ -859,6 +874,8 @@ github.com/sony/gobreaker - ## explicit - github.com/spf13/afero - github.com/spf13/afero/mem -+# github.com/spf13/cast v1.3.1 -+github.com/spf13/cast - # github.com/spf13/pflag v1.0.5 - github.com/spf13/pflag - # github.com/stretchr/objx v0.2.0 -@@ -1109,8 +1126,10 @@ golang.org/x/crypto/blake2b - golang.org/x/crypto/blowfish - golang.org/x/crypto/ed25519 - golang.org/x/crypto/ed25519/internal/edwards25519 -+golang.org/x/crypto/pbkdf2 - golang.org/x/crypto/pkcs12 - golang.org/x/crypto/pkcs12/internal/rc2 -+golang.org/x/crypto/scrypt - golang.org/x/crypto/sha3 - golang.org/x/crypto/ssh/terminal - # golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5",unknown,"support math functions in line_format and label_format (#3434) - -* support math functions in line_format and label_format - -* fix lint and tests - -* added docs and more tests - -* update doc - -* remove toString and int64 - -* doc adjustement. - -Signed-off-by: Cyril Tovena - -Co-authored-by: Cyril Tovena " -c1ca782dbd4fa3515ad7dcc107d1306224c64f33,2023-04-04 20:27:02,Periklis Tsirakidis,operator: Remove static placeholder suffix for openshift bundle (#8998),False,"diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md -index da3fe440e19ed..bcc54554e2a67 100644 ---- a/operator/CHANGELOG.md -+++ b/operator/CHANGELOG.md -@@ -1,5 +1,6 @@ - ## Main - -+- [8998](https://github.com/grafana/loki/pull/8998) **periklis**: Remove static placeholder suffix for openshift bundle - - [8930](https://github.com/grafana/loki/pull/8930) **periklis**: Fix makefile target operatorhub - - [8911](https://github.com/grafana/loki/pull/8911) **aminesnow**: Update LokiStack annotaion on RulerConfig delete - -diff --git a/operator/Makefile b/operator/Makefile -index ff1594d2a7bff..ec0ce23b56e3d 100644 ---- a/operator/Makefile -+++ b/operator/Makefile -@@ -41,7 +41,7 @@ ifeq ($(VARIANT), openshift) - ifeq ($(REGISTRY_BASE), $(REGISTRY_BASE_COMMUNITY)) - REGISTRY_BASE = $(REGISTRY_BASE_OPENSHIFT) - endif -- VERSION = v0.1.0-placeholder -+ VERSION = v0.1.0 - CHANNELS = stable - DEFAULT_CHANNEL = stable - LOKI_OPERATOR_NS = openshift-operators-redhat -diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml -index a2a8cec876047..2e85a1bf7f8ad 100644 ---- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml -+++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml -@@ -150,7 +150,7 @@ metadata: - categories: OpenShift Optional, Logging & Tracing - certified: ""false"" - containerImage: quay.io/openshift-logging/loki-operator:v0.1.0 -- createdAt: ""2023-03-27T19:03:01Z"" -+ createdAt: ""2023-04-03T19:44:20Z"" - description: | - The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging. - ## Prerequisites and Requirements -@@ -173,7 +173,7 @@ metadata: - operatorframework.io/arch.arm64: supported - operatorframework.io/arch.ppc64le: supported - operatorframework.io/arch.s390x: supported -- name: loki-operator.v0.1.0-placeholder -+ name: loki-operator.v0.1.0 - namespace: placeholder - spec: - apiservicedefinitions: {} -@@ -1486,7 +1486,7 @@ spec: - value: quay.io/observatorium/api:latest - - name: RELATED_IMAGE_OPA - value: quay.io/observatorium/opa-openshift:latest -- image: quay.io/openshift-logging/loki-operator:v0.1.0-placeholder -+ image: quay.io/openshift-logging/loki-operator:v0.1.0 - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: -@@ -1608,7 +1608,7 @@ spec: - name: gateway - - image: quay.io/observatorium/opa-openshift:latest - name: opa -- version: 0.1.0-placeholder -+ version: 0.1.0 - webhookdefinitions: - - admissionReviewVersions: - - v1 -diff --git a/operator/config/manager/kustomization.yaml b/operator/config/manager/kustomization.yaml -index 44ceff8c779d1..86cb69733899b 100644 ---- a/operator/config/manager/kustomization.yaml -+++ b/operator/config/manager/kustomization.yaml -@@ -6,4 +6,4 @@ kind: Kustomization - images: - - name: controller - newName: quay.io/openshift-logging/loki-operator -- newTag: v0.1.0-placeholder -+ newTag: v0.1.0",operator,Remove static placeholder suffix for openshift bundle (#8998) -efdae3df14c47d627eb99e91466e0451db6e16f6,2024-05-24 01:55:50,hayden,"feat(helm): Support for PVC Annotations for Non-Distributed Modes (#12023) - -Signed-off-by: hfuss -Co-authored-by: J Stickler -Co-authored-by: Trevor Whitney ",False,"diff --git a/docs/Makefile b/docs/Makefile -index 63fc849789c11..4bed302d71794 100644 ---- a/docs/Makefile -+++ b/docs/Makefile -@@ -10,6 +10,7 @@ include docs.mk - PODMAN := $(shell if command -v podman >/dev/null 2>&1; then echo podman; else echo docker; fi) - BUILD_IN_CONTAINER ?= true - -+.PHONY: sources/setup/install/helm/reference.md - sources/setup/install/helm/reference.md: ../production/helm/loki/reference.md.gotmpl ../production/helm/loki/values.yaml - ifeq ($(BUILD_IN_CONTAINER),true) - $(PODMAN) run --rm --volume ""$(realpath ..):/helm-docs"" -u ""$$(id -u)"" ""docker.io/jnorwood/helm-docs:v1.11.0"" \ -diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md -index 53101a4832143..76b4936f20bfd 100644 ---- a/docs/sources/setup/install/helm/reference.md -+++ b/docs/sources/setup/install/helm/reference.md -@@ -315,6 +315,7 @@ This is the generated reference for the Loki Helm Chart values. - ""initContainers"": [], - ""nodeSelector"": {}, - ""persistence"": { -+ ""annotations"": {}, - ""dataVolumeParameters"": { - ""emptyDir"": {} - }, -@@ -512,6 +513,15 @@ null -
- {}
- 
-+ -+ -+ -+ backend.persistence.annotations -+ object -+ Annotations for volume claim -+
-+{}
-+
- - - -@@ -6226,6 +6236,7 @@ false - ""drivesPerNode"": 2, - ""enabled"": false, - ""persistence"": { -+ ""annotations"": {}, - ""size"": ""5Gi"" - }, - ""replicas"": 1, -@@ -8442,6 +8453,7 @@ false - ""lifecycle"": {}, - ""nodeSelector"": {}, - ""persistence"": { -+ ""annotations"": {}, - ""enableStatefulSetAutoDeletePVC"": true, - ""selector"": null, - ""size"": ""10Gi"", -@@ -8653,6 +8665,15 @@ false -
- {}
- 
-+ -+ -+ -+ read.persistence.annotations -+ object -+ Annotations for volume claim -+
-+{}
-+
- - - -@@ -9893,6 +9914,15 @@ null -
- {}
- 
-+ -+ -+ -+ singleBinary.persistence.annotations -+ object -+ Annotations for volume claim -+
-+{}
-+
- - - -@@ -10677,6 +10707,15 @@ null -
- {}
- 
-+ -+ -+ -+ write.persistence.annotations -+ object -+ Annotations for volume claim -+
-+{}
-+
- - - -diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md -index 1606c89914f88..77b801e603631 100644 ---- a/production/helm/loki/CHANGELOG.md -+++ b/production/helm/loki/CHANGELOG.md -@@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang - - [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) - -+## 6.6.0 -+ -+- [ENHANCEMENT] Allow setting PVC annotations for all volume claim templates in simple scalable and single binary mode -+ - ## 6.5.2 - - - [BUGFIX] Fixed Ingress routing for all deployment modes. -diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml -index 989a54d146a1d..637e66d70887e 100644 ---- a/production/helm/loki/Chart.yaml -+++ b/production/helm/loki/Chart.yaml -@@ -3,7 +3,7 @@ name: loki - description: Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. - type: application - appVersion: 3.0.0 --version: 6.5.2 -+version: 6.6.0 - home: https://grafana.github.io/helm-charts - sources: - - https://github.com/grafana/loki -diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md -index 55a7256c72f7f..5fa6bd548bad7 100644 ---- a/production/helm/loki/README.md -+++ b/production/helm/loki/README.md -@@ -1,6 +1,6 @@ - # loki - --![Version: 6.5.2](https://img.shields.io/badge/Version-6.5.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square) -+![Version: 6.6.0](https://img.shields.io/badge/Version-6.6.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square) - - Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. - -diff --git a/production/helm/loki/templates/backend/statefulset-backend.yaml b/production/helm/loki/templates/backend/statefulset-backend.yaml -index f96f0a4d21217..534190d4a4533 100644 ---- a/production/helm/loki/templates/backend/statefulset-backend.yaml -+++ b/production/helm/loki/templates/backend/statefulset-backend.yaml -@@ -266,6 +266,10 @@ spec: - kind: PersistentVolumeClaim - metadata: - name: data -+ {{- with .Values.backend.persistence.annotations }} -+ annotations: -+ {{- toYaml . | nindent 10 }} -+ {{- end }} - spec: - accessModes: - - ReadWriteOnce -diff --git a/production/helm/loki/templates/read/statefulset-read.yaml b/production/helm/loki/templates/read/statefulset-read.yaml -index 0a31de4996dfb..7696d90e65bd6 100644 ---- a/production/helm/loki/templates/read/statefulset-read.yaml -+++ b/production/helm/loki/templates/read/statefulset-read.yaml -@@ -180,6 +180,10 @@ spec: - kind: PersistentVolumeClaim - metadata: - name: data -+ {{- with .Values.read.persistence.annotations }} -+ annotations: -+ {{- toYaml . | nindent 10 }} -+ {{- end }} - spec: - accessModes: - - ReadWriteOnce -diff --git a/production/helm/loki/templates/single-binary/statefulset.yaml b/production/helm/loki/templates/single-binary/statefulset.yaml -index 51c0062fc94ff..7bd2b9813f609 100644 ---- a/production/helm/loki/templates/single-binary/statefulset.yaml -+++ b/production/helm/loki/templates/single-binary/statefulset.yaml -@@ -175,6 +175,10 @@ spec: - kind: PersistentVolumeClaim - metadata: - name: storage -+ {{- with .Values.singleBinary.persistence.annotations }} -+ annotations: -+ {{- toYaml . | nindent 10 }} -+ {{- end }} - spec: - accessModes: - - ReadWriteOnce -diff --git a/production/helm/loki/templates/write/statefulset-write.yaml b/production/helm/loki/templates/write/statefulset-write.yaml -index 54c936958b559..75605c27c26cb 100644 ---- a/production/helm/loki/templates/write/statefulset-write.yaml -+++ b/production/helm/loki/templates/write/statefulset-write.yaml -@@ -193,6 +193,10 @@ spec: - kind: PersistentVolumeClaim - metadata: - name: data -+ {{- with .Values.write.persistence.annotations }} -+ annotations: -+ {{- toYaml . | nindent 10 }} -+ {{- end }} - spec: - accessModes: - - ReadWriteOnce -diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml -index 3edfc24ba34fb..4c70bf16fe474 100644 ---- a/production/helm/loki/values.yaml -+++ b/production/helm/loki/values.yaml -@@ -1294,6 +1294,8 @@ singleBinary: - storageClass: null - # -- Selector for persistent disk - selector: null -+ # -- Annotations for volume claim -+ annotations: {} - ###################################################################################################################### - # - # Simple Scalable Deployment (SSD) Mode -@@ -1421,6 +1423,8 @@ write: - storageClass: null - # -- Selector for persistent disk - selector: null -+ # -- Annotations for volume claim -+ annotations: {} - # -- Configuration for the read pod(s) - read: - # -- Number of replicas for the read -@@ -1528,6 +1532,8 @@ read: - storageClass: null - # -- Selector for persistent disk - selector: null -+ # -- Annotations for volume claim -+ annotations: {} - # -- Configuration for the backend pod(s) - backend: - # -- Number of replicas for the backend -@@ -1636,6 +1642,8 @@ backend: - storageClass: null - # -- Selector for persistent disk - selector: null -+ # -- Annotations for volume claim -+ annotations: {} - ###################################################################################################################### - # - # Microservices Mode -@@ -3091,6 +3099,7 @@ minio: - purge: false - persistence: - size: 5Gi -+ annotations: {} - resources: - requests: - cpu: 100m",feat,"Support for PVC Annotations for Non-Distributed Modes (#12023) - -Signed-off-by: hfuss -Co-authored-by: J Stickler -Co-authored-by: Trevor Whitney " -e0dacd2795ebc4179e0b34250b170bb706c9ee5e,2020-05-15 02:41:48,Owen Diehl,removes yolostring (#2078),False,"diff --git a/pkg/logproto/types.go b/pkg/logproto/types.go -index e3759c28e7f4f..20c5a0a591d1e 100644 ---- a/pkg/logproto/types.go -+++ b/pkg/logproto/types.go -@@ -4,7 +4,6 @@ import ( - fmt ""fmt"" - io ""io"" - time ""time"" -- ""unsafe"" - ) - - // Stream contains a unique labels set as a string and a set of entries for it. -@@ -148,7 +147,7 @@ func (m *Stream) Unmarshal(dAtA []byte) error { - if postIndex > l { - return io.ErrUnexpectedEOF - } -- m.Labels = yoloString(dAtA[iNdEx:postIndex]) -+ m.Labels = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { -@@ -301,7 +300,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error { - if postIndex > l { - return io.ErrUnexpectedEOF - } -- m.Line = yoloString(dAtA[iNdEx:postIndex]) -+ m.Line = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex -@@ -421,7 +420,3 @@ func (m *Entry) Equal(that interface{}) bool { - } - return true - } -- --func yoloString(buf []byte) string { -- return *((*string)(unsafe.Pointer(&buf))) --}",unknown,removes yolostring (#2078) -3f472330790204e4d09b7a4e087be3ff0dc04eff,2024-09-24 17:37:03,Cyril Tovena,fix(kafka): Fixes partition selection in distributors (#14242),False,"diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go -index 08fba483ec9bc..3ad586f3e596f 100644 ---- a/pkg/distributor/distributor.go -+++ b/pkg/distributor/distributor.go -@@ -901,12 +901,11 @@ func (d *Distributor) sendStreamToKafka(ctx context.Context, stream KeyedStream, - if len(stream.Stream.Entries) == 0 { - return nil - } -- /* partitionID, err := d.partitionRing.PartitionRing().ActivePartitionForKey(stream.HashKey) -- if err != nil { -- d.kafkaAppends.WithLabelValues(""kafka"", ""fail"").Inc() -- return fmt.Errorf(""failed to find active partition for stream: %w"", err) -- }*/ -- partitionID := int32(0) -+ partitionID, err := d.partitionRing.PartitionRing().ActivePartitionForKey(stream.HashKey) -+ if err != nil { -+ d.kafkaAppends.WithLabelValues(""kafka"", ""fail"").Inc() -+ return fmt.Errorf(""failed to find active partition for stream: %w"", err) -+ } - - startTime := time.Now()",fix,Fixes partition selection in distributors (#14242) -202f40aebcad4c2f65f8cf4e656459b494381c82,2024-08-16 20:16:54,renovate[bot],"chore(deps): update dependabot/fetch-metadata action to v2 (#13908) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/.github/workflows/dependabot_reviewer.yml b/.github/workflows/dependabot_reviewer.yml -index ae25e2259dce8..4d19323a7b74b 100644 ---- a/.github/workflows/dependabot_reviewer.yml -+++ b/.github/workflows/dependabot_reviewer.yml -@@ -20,7 +20,7 @@ jobs: - - - name: Dependabot metadata - id: metadata -- uses: dependabot/fetch-metadata@v1.6.0 -+ uses: dependabot/fetch-metadata@v2.2.0 - with: - github-token: ""${{ secrets.GITHUB_TOKEN }}""",chore,"update dependabot/fetch-metadata action to v2 (#13908) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -9136ba32b4ee7f5d2d10c0e3a567f63d4d97c552,2025-02-16 22:39:17,renovate[bot],"fix(deps): update dependency @radix-ui/react-scroll-area to v1.2.3 (main) (#16308) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json -index bf7de91809abd..78cea544371de 100644 ---- a/pkg/ui/frontend/package-lock.json -+++ b/pkg/ui/frontend/package-lock.json -@@ -2583,9 +2583,9 @@ - } - }, - ""node_modules/@radix-ui/react-scroll-area"": { -- ""version"": ""1.2.2"", -- ""resolved"": ""https://registry.npmjs.org/@radix-ui/react-scroll-area/-/react-scroll-area-1.2.2.tgz"", -- ""integrity"": ""sha512-EFI1N/S3YxZEW/lJ/H1jY3njlvTd8tBmgKEn4GHi51+aMm94i6NmAJstsm5cu3yJwYqYc93gpCPm21FeAbFk6g=="", -+ ""version"": ""1.2.3"", -+ ""resolved"": ""https://registry.npmjs.org/@radix-ui/react-scroll-area/-/react-scroll-area-1.2.3.tgz"", -+ ""integrity"": ""sha512-l7+NNBfBYYJa9tNqVcP2AGvxdE3lmE6kFTBXdvHgUaZuy+4wGCL1Cl2AfaR7RKyimj7lZURGLwFO59k4eBnDJQ=="", - ""license"": ""MIT"", - ""dependencies"": { - ""@radix-ui/number"": ""1.1.0"", -@@ -2594,7 +2594,7 @@ - ""@radix-ui/react-context"": ""1.1.1"", - ""@radix-ui/react-direction"": ""1.1.0"", - ""@radix-ui/react-presence"": ""1.1.2"", -- ""@radix-ui/react-primitive"": ""2.0.1"", -+ ""@radix-ui/react-primitive"": ""2.0.2"", - ""@radix-ui/react-use-callback-ref"": ""1.1.0"", - ""@radix-ui/react-use-layout-effect"": ""1.1.0"" - }, -@@ -2613,6 +2613,47 @@ - } - } - }, -+ ""node_modules/@radix-ui/react-scroll-area/node_modules/@radix-ui/react-primitive"": { -+ ""version"": ""2.0.2"", -+ ""resolved"": ""https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.0.2.tgz"", -+ ""integrity"": ""sha512-Ec/0d38EIuvDF+GZjcMU/Ze6MxntVJYO/fRlCPhCaVUyPY9WTalHJw54tp9sXeJo3tlShWpy41vQRgLRGOuz+w=="", -+ ""license"": ""MIT"", -+ ""dependencies"": { -+ ""@radix-ui/react-slot"": ""1.1.2"" -+ }, -+ ""peerDependencies"": { -+ ""@types/react"": ""*"", -+ ""@types/react-dom"": ""*"", -+ ""react"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"", -+ ""react-dom"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"" -+ }, -+ ""peerDependenciesMeta"": { -+ ""@types/react"": { -+ ""optional"": true -+ }, -+ ""@types/react-dom"": { -+ ""optional"": true -+ } -+ } -+ }, -+ ""node_modules/@radix-ui/react-scroll-area/node_modules/@radix-ui/react-slot"": { -+ ""version"": ""1.1.2"", -+ ""resolved"": ""https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz"", -+ ""integrity"": ""sha512-YAKxaiGsSQJ38VzKH86/BPRC4rh+b1Jpa+JneA5LRE7skmLPNAyeG8kPJj/oo4STLvlrs8vkf/iYyc3A5stYCQ=="", -+ ""license"": ""MIT"", -+ ""dependencies"": { -+ ""@radix-ui/react-compose-refs"": ""1.1.1"" -+ }, -+ ""peerDependencies"": { -+ ""@types/react"": ""*"", -+ ""react"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"" -+ }, -+ ""peerDependenciesMeta"": { -+ ""@types/react"": { -+ ""optional"": true -+ } -+ } -+ }, - ""node_modules/@radix-ui/react-select"": { - ""version"": ""2.1.5"", - ""resolved"": ""https://registry.npmjs.org/@radix-ui/react-select/-/react-select-2.1.5.tgz"",",fix,"update dependency @radix-ui/react-scroll-area to v1.2.3 (main) (#16308) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -e6cf42396f7554e46b6c331dd1938922806bcfc5,2024-11-01 19:49:15,George Robinson,fix: move partition_id into label to make PromQL easier (#14714),False,"diff --git a/pkg/kafka/partition/metrics.go b/pkg/kafka/partition/metrics.go -index 6cafb2da40f51..7979ea70a40c0 100644 ---- a/pkg/kafka/partition/metrics.go -+++ b/pkg/kafka/partition/metrics.go -@@ -2,6 +2,7 @@ package partition - - import ( - ""math"" -+ ""strconv"" - ""time"" - - ""github.com/prometheus/client_golang/prometheus"" -@@ -12,7 +13,7 @@ import ( - ) - - type readerMetrics struct { -- partition prometheus.Gauge -+ partition *prometheus.GaugeVec - phase *prometheus.GaugeVec - receiveDelay *prometheus.HistogramVec - recordsPerFetch prometheus.Histogram -@@ -26,10 +27,10 @@ type readerMetrics struct { - // newReaderMetrics initializes and returns a new set of metrics for the PartitionReader. - func newReaderMetrics(r prometheus.Registerer) readerMetrics { - return readerMetrics{ -- partition: promauto.With(r).NewGauge(prometheus.GaugeOpts{ -- Name: ""loki_ingest_storage_reader_partition_id"", -+ partition: promauto.With(r).NewGaugeVec(prometheus.GaugeOpts{ -+ Name: ""loki_ingest_storage_reader_partition"", - Help: ""The partition ID assigned to this reader."", -- }), -+ }, []string{""id""}), - phase: promauto.With(r).NewGaugeVec(prometheus.GaugeOpts{ - Name: ""loki_ingest_storage_reader_phase"", - Help: ""The current phase of the consumer."", -@@ -66,13 +67,13 @@ func newReaderMetrics(r prometheus.Registerer) readerMetrics { - } - - func (m *readerMetrics) reportStarting(partitionID int32) { -- m.partition.Set(float64(partitionID)) -+ m.partition.WithLabelValues(strconv.Itoa(int(partitionID))).Set(1) - m.phase.WithLabelValues(phaseStarting).Set(1) - m.phase.WithLabelValues(phaseRunning).Set(0) - } - - func (m *readerMetrics) reportRunning(partitionID int32) { -- m.partition.Set(float64(partitionID)) -+ m.partition.WithLabelValues(strconv.Itoa(int(partitionID))).Set(1) - m.phase.WithLabelValues(phaseStarting).Set(0) - m.phase.WithLabelValues(phaseRunning).Set(1) - }",fix,move partition_id into label to make PromQL easier (#14714) -7f498e96174171bca3a1aa179558e2013ec726ba,2022-03-09 11:36:55,Sandeep Sukhani,cleanup common index source files at the end while compacting index with boltdb-shipper (#5585),False,"diff --git a/pkg/storage/stores/shipper/compactor/table.go b/pkg/storage/stores/shipper/compactor/table.go -index c01ee6cfe8b9d..6289216459f07 100644 ---- a/pkg/storage/stores/shipper/compactor/table.go -+++ b/pkg/storage/stores/shipper/compactor/table.go -@@ -223,9 +223,22 @@ func (t *table) done() error { - } - } - -- for _, is := range t.indexSets { -- err := is.done() -- if err != nil { -+ for userID, is := range t.indexSets { -+ // indexSet.done() uploads the compacted db and cleans up the source index files. -+ // For user index sets, the files from common index sets are also a source of index. -+ // if we cleanup common index sets first, and we fail to upload newly compacted dbs in user index sets, then we will lose data. -+ // To avoid any data loss, we should call done() on common index sets at the end. -+ if userID == """" { -+ continue -+ } -+ -+ if err := is.done(); err != nil { -+ return err -+ } -+ } -+ -+ if commonIndexSet, ok := t.indexSets[""""]; ok { -+ if err := commonIndexSet.done(); err != nil { - return err - } - }",unknown,cleanup common index source files at the end while compacting index with boltdb-shipper (#5585) -177f37700a7d20df622acd73351e26307b7f29e8,2022-08-30 13:15:15,Christian Haudum,"Make TSDB store singleton resettable (#6950) - -This changes allows downstream projects to reset the TSDB store singleton. This must only be called in test cases where a new store instances cannot be explicitly created. - -Also add a `nil` check to the reset function for the BoltDB shipper singleton instance. - -Signed-off-by: Christian Haudum ",False,"diff --git a/pkg/storage/factory.go b/pkg/storage/factory.go -index ca666fbd5c928..8157b0c714ca5 100644 ---- a/pkg/storage/factory.go -+++ b/pkg/storage/factory.go -@@ -40,6 +40,9 @@ var boltDBIndexClientWithShipper index.Client - // ResetBoltDBIndexClientWithShipper allows to reset the singleton. - // MUST ONLY BE USED IN TESTS - func ResetBoltDBIndexClientWithShipper() { -+ if boltDBIndexClientWithShipper == nil { -+ return -+ } - boltDBIndexClientWithShipper.Stop() - boltDBIndexClientWithShipper = nil - } -diff --git a/pkg/storage/stores/tsdb/store.go b/pkg/storage/stores/tsdb/store.go -index 65e6c158a6c69..737980af2d8ab 100644 ---- a/pkg/storage/stores/tsdb/store.go -+++ b/pkg/storage/stores/tsdb/store.go -@@ -26,6 +26,18 @@ type store struct { - stopOnce sync.Once - } - -+var storeInstance *store -+ -+// This must only be called in test cases where a new store instances -+// cannot be explicitly created. -+func ResetStoreInstance() { -+ if storeInstance == nil { -+ return -+ } -+ storeInstance.Stop() -+ storeInstance = nil -+} -+ - type newStoreFactoryFunc func( - indexShipperCfg indexshipper.Config, - p config.PeriodConfig, -@@ -50,7 +62,6 @@ type newStoreFactoryFunc func( - // If we do need to do schema specific handling, it would be a good idea to abstract away the handling since - // running multiple head managers would be complicated and wasteful. - var NewStore = func() newStoreFactoryFunc { -- var storeInstance *store - return func( - indexShipperCfg indexshipper.Config, - p config.PeriodConfig,",unknown,"Make TSDB store singleton resettable (#6950) - -This changes allows downstream projects to reset the TSDB store singleton. This must only be called in test cases where a new store instances cannot be explicitly created. - -Also add a `nil` check to the reset function for the BoltDB shipper singleton instance. - -Signed-off-by: Christian Haudum " -1161846e19105e2669a5b388998722c23bd0f2f4,2024-04-23 21:51:36,Travis Patterson,fix(docs): Move promtail configuration to the correct doc (#12737),False,"diff --git a/docs/sources/send-data/promtail/configuration.md b/docs/sources/send-data/promtail/configuration.md -index ce1e329c7ea0d..7d210b7ec47a0 100644 ---- a/docs/sources/send-data/promtail/configuration.md -+++ b/docs/sources/send-data/promtail/configuration.md -@@ -43,6 +43,13 @@ For more detailed information on configuring how to discover and scrape logs fro - targets, see [Scraping]({{< relref ""./scraping"" >}}). For more information on transforming logs - from scraped targets, see [Pipelines]({{< relref ""./pipelines"" >}}). - -+## Reload at runtime -+ -+Promtail can reload its configuration at runtime. If the new configuration -+is not well-formed, the changes will not be applied. -+A configuration reload is triggered by sending a `SIGHUP` to the Promtail process or -+sending a HTTP POST request to the `/reload` endpoint (when the `--server.enable-runtime-reload` flag is enabled). -+ - ### Use environment variables in the configuration - - You can use environment variable references in the configuration file to set values that need to be configurable during deployment. -diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md -index 8c2413e26250b..be44ae74ca75b 100644 ---- a/docs/sources/shared/configuration.md -+++ b/docs/sources/shared/configuration.md -@@ -24,13 +24,6 @@ is especially useful in making sure your config files and flags are being read a - `-log-config-reverse-order` is the flag we run Loki with in all our environments, the config entries are reversed so - that the order of configs reads correctly top to bottom when viewed in Grafana's Explore. - --## Reload at runtime -- --Promtail can reload its configuration at runtime. If the new configuration --is not well-formed, the changes will not be applied. --A configuration reload is triggered by sending a `SIGHUP` to the Promtail process or --sending a HTTP POST request to the `/reload` endpoint (when the `--server.enable-runtime-reload` flag is enabled). -- - ## Configuration file reference - - To specify which configuration file to load, pass the `-config.file` flag at the -diff --git a/docs/templates/configuration.template b/docs/templates/configuration.template -index d5a9f750559df..47945146b210e 100644 ---- a/docs/templates/configuration.template -+++ b/docs/templates/configuration.template -@@ -24,13 +24,6 @@ is especially useful in making sure your config files and flags are being read a - `-log-config-reverse-order` is the flag we run Loki with in all our environments, the config entries are reversed so - that the order of configs reads correctly top to bottom when viewed in Grafana's Explore. - --## Reload at runtime -- --Promtail can reload its configuration at runtime. If the new configuration --is not well-formed, the changes will not be applied. --A configuration reload is triggered by sending a `SIGHUP` to the Promtail process or --sending a HTTP POST request to the `/reload` endpoint (when the `--server.enable-runtime-reload` flag is enabled). -- - ## Configuration file reference - - To specify which configuration file to load, pass the `-config.file` flag at the",fix,Move promtail configuration to the correct doc (#12737) -b4a926c406ef14603e3a52002d6cdd54abe192cd,2024-11-04 20:50:15,renovate[bot],"fix(deps): update module github.com/docker/docker to v27.3.1+incompatible (#14753) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/go.mod b/go.mod -index d73f8eb8f3dd7..3dc204744f551 100644 ---- a/go.mod -+++ b/go.mod -@@ -28,7 +28,7 @@ require ( - github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf - github.com/cristalhq/hedgedhttp v0.9.1 - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc -- github.com/docker/docker v27.1.2+incompatible -+ github.com/docker/docker v27.3.1+incompatible - github.com/docker/go-plugins-helpers v0.0.0-20240701071450-45e2431495c8 - github.com/drone/envsubst v1.0.3 - github.com/dustin/go-humanize v1.0.1 -diff --git a/go.sum b/go.sum -index 439c4b64975dd..cf1eb643b5c22 100644 ---- a/go.sum -+++ b/go.sum -@@ -1167,8 +1167,8 @@ github.com/dnaeon/go-vcr v0.0.0-20180814043457-aafff18a5cc2/go.mod h1:aBB1+wY4s9 - github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= - github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= - github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= --github.com/docker/docker v27.1.2+incompatible h1:AhGzR1xaQIy53qCkxARaFluI00WPGtXn0AJuoQsVYTY= --github.com/docker/docker v27.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -+github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= -+github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= - github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= - github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= - github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go -index f831735f840e8..93d64cd8d5ffe 100644 ---- a/vendor/github.com/docker/docker/api/common.go -+++ b/vendor/github.com/docker/docker/api/common.go -@@ -3,7 +3,7 @@ package api // import ""github.com/docker/docker/api"" - // Common constants for daemon and client. - const ( - // DefaultVersion of the current REST API. -- DefaultVersion = ""1.46"" -+ DefaultVersion = ""1.47"" - - // MinSupportedAPIVersion is the minimum API version that can be supported - // by the API server, specified as ""major.minor"". Note that the daemon -diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml -index 4a1b7087d8c01..7164e1eba53d5 100644 ---- a/vendor/github.com/docker/docker/api/swagger.yaml -+++ b/vendor/github.com/docker/docker/api/swagger.yaml -@@ -19,10 +19,10 @@ produces: - consumes: - - ""application/json"" - - ""text/plain"" --basePath: ""/v1.46"" -+basePath: ""/v1.47"" - info: - title: ""Docker Engine API"" -- version: ""1.46"" -+ version: ""1.47"" - x-logo: - url: ""https://docs.docker.com/assets/images/logo-docker-main.png"" - description: | -@@ -55,8 +55,8 @@ info: - the URL is not supported by the daemon, a HTTP `400 Bad Request` error message - is returned. - -- If you omit the version-prefix, the current version of the API (v1.46) is used. -- For example, calling `/info` is the same as calling `/v1.46/info`. Using the -+ If you omit the version-prefix, the current version of the API (v1.47) is used. -+ For example, calling `/info` is the same as calling `/v1.47/info`. Using the - API without a version-prefix is deprecated and will be removed in a future release. - - Engine releases in the near future should support this version of the API, -@@ -393,7 +393,7 @@ definitions: - Make the mount non-recursively read-only, but still leave the mount recursive - (unless NonRecursive is set to `true` in conjunction). - -- Addded in v1.44, before that version all read-only mounts were -+ Added in v1.44, before that version all read-only mounts were - non-recursive by default. To match the previous behaviour this - will default to `true` for clients on versions prior to v1.44. - type: ""boolean"" -@@ -1384,7 +1384,7 @@ definitions: -


- - > **Deprecated**: this field is not part of the image specification and is -- > always empty. It must not be used, and will be removed in API v1.47. -+ > always empty. It must not be used, and will be removed in API v1.48. - type: ""string"" - example: """" - Domainname: -@@ -1394,7 +1394,7 @@ definitions: -


- - > **Deprecated**: this field is not part of the image specification and is -- > always empty. It must not be used, and will be removed in API v1.47. -+ > always empty. It must not be used, and will be removed in API v1.48. - type: ""string"" - example: """" - User: -@@ -1408,7 +1408,7 @@ definitions: -


- - > **Deprecated**: this field is not part of the image specification and is -- > always false. It must not be used, and will be removed in API v1.47. -+ > always false. It must not be used, and will be removed in API v1.48. - type: ""boolean"" - default: false - example: false -@@ -1419,7 +1419,7 @@ definitions: -


- - > **Deprecated**: this field is not part of the image specification and is -- > always false. It must not be used, and will be removed in API v1.47. -+ > always false. It must not be used, and will be removed in API v1.48. - type: ""boolean"" - default: false - example: false -@@ -1430,7 +1430,7 @@ definitions: -


- - > **Deprecated**: this field is not part of the image specification and is -- > always false. It must not be used, and will be removed in API v1.47. -+ > always false. It must not be used, and will be removed in API v1.48. - type: ""boolean"" - default: false - example: false -@@ -1457,7 +1457,7 @@ definitions: -


- - > **Deprecated**: this field is not part of the image specification and is -- > always false. It must not be used, and will be removed in API v1.47. -+ > always false. It must not be used, and will be removed in API v1.48. - type: ""boolean"" - default: false - example: false -@@ -1468,7 +1468,7 @@ definitions: -


- - > **Deprecated**: this field is not part of the image specification and is -- > always false. It must not be used, and will be removed in API v1.47. -+ > always false. It must not be used, and will be removed in API v1.48. - type: ""boolean"" - default: false - example: false -@@ -1479,7 +1479,7 @@ definitions: -


- - > **Deprecated**: this field is not part of the image specification and is -- > always false. It must not be used, and will be removed in API v1.47. -+ > always false. It must not be used, and will be removed in API v1.48. - type: ""boolean"" - default: false - example: false -@@ -1516,7 +1516,7 @@ definitions: -


- - > **Deprecated**: this field is not part of the image specification and is -- > always empty. It must not be used, and will be removed in API v1.47. -+ > always empty. It must not be used, and will be removed in API v1.48. - type: ""string"" - default: """" - example: """" -@@ -1555,7 +1555,7 @@ definitions: -


- - > **Deprecated**: this field is not part of the image specification and is -- > always omitted. It must not be used, and will be removed in API v1.47. -+ > always omitted. It must not be used, and will be removed in API v1.48. - type: ""boolean"" - default: false - example: false -@@ -1567,7 +1567,7 @@ definitions: -


- - > **Deprecated**: this field is not part of the image specification and is -- > always omitted. It must not be used, and will be removed in API v1.47. -+ > always omitted. It must not be used, and will be removed in API v1.48. - type: ""string"" - default: """" - example: """" -@@ -1601,7 +1601,7 @@ definitions: -


- - > **Deprecated**: this field is not part of the image specification and is -- > always omitted. It must not be used, and will be removed in API v1.47. -+ > always omitted. It must not be used, and will be removed in API v1.48. - type: ""integer"" - default: 10 - x-nullable: true -@@ -2216,7 +2216,7 @@ definitions: - Created: - description: | - Date and time at which the image was created as a Unix timestamp -- (number of seconds sinds EPOCH). -+ (number of seconds since EPOCH). - type: ""integer"" - x-nullable: false - example: ""1644009612"" -@@ -2265,6 +2265,19 @@ definitions: - x-nullable: false - type: ""integer"" - example: 2 -+ Manifests: -+ description: | -+ Manifests is a list of manifests available in this image. -+ It provides a more detailed view of the platform-specific image manifests -+ or other image-attached data like build attestations. -+ -+ WARNING: This is experimental and may change at any time without any backward -+ compatibility. -+ type: ""array"" -+ x-nullable: false -+ x-omitempty: true -+ items: -+ $ref: ""#/definitions/ImageManifestSummary"" - - AuthConfig: - type: ""object"" -@@ -2500,7 +2513,7 @@ definitions: - example: false - Attachable: - description: | -- Wheter a global / swarm scope network is manually attachable by regular -+ Whether a global / swarm scope network is manually attachable by regular - containers from workers in swarm mode. - type: ""boolean"" - default: false -@@ -3723,7 +3736,7 @@ definitions: - example: ""json-file"" - Options: - description: | -- Driver-specific options for the selectd log driver, specified -+ Driver-specific options for the selected log driver, specified - as key/value pairs. - type: ""object"" - additionalProperties: -@@ -5318,7 +5331,7 @@ definitions: - description: | - The default (and highest) API version that is supported by the daemon - type: ""string"" -- example: ""1.46"" -+ example: ""1.47"" - MinAPIVersion: - description: | - The minimum API version that is supported by the daemon -@@ -5334,7 +5347,7 @@ definitions: - The version Go used to compile the daemon, and the version of the Go - runtime in use. - type: ""string"" -- example: ""go1.21.13"" -+ example: ""go1.22.7"" - Os: - description: | - The operating system that the daemon is running on (""linux"" or ""windows"") -@@ -6644,6 +6657,120 @@ definitions: - additionalProperties: - type: ""string"" - -+ ImageManifestSummary: -+ x-go-name: ""ManifestSummary"" -+ description: | -+ ImageManifestSummary represents a summary of an image manifest. -+ type: ""object"" -+ required: [""ID"", ""Descriptor"", ""Available"", ""Size"", ""Kind""] -+ properties: -+ ID: -+ description: | -+ ID is the content-addressable ID of an image and is the same as the -+ digest of the image manifest. -+ type: ""string"" -+ example: ""sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f"" -+ Descriptor: -+ $ref: ""#/definitions/OCIDescriptor"" -+ Available: -+ description: Indicates whether all the child content (image config, layers) is fully available locally. -+ type: ""boolean"" -+ example: true -+ Size: -+ type: ""object"" -+ x-nullable: false -+ required: [""Content"", ""Total""] -+ properties: -+ Total: -+ type: ""integer"" -+ format: ""int64"" -+ example: 8213251 -+ description: | -+ Total is the total size (in bytes) of all the locally present -+ data (both distributable and non-distributable) that's related to -+ this manifest and its children. -+ This equal to the sum of [Content] size AND all the sizes in the -+ [Size] struct present in the Kind-specific data struct. -+ For example, for an image kind (Kind == ""image"") -+ this would include the size of the image content and unpacked -+ image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). -+ Content: -+ description: | -+ Content is the size (in bytes) of all the locally present -+ content in the content store (e.g. image config, layers) -+ referenced by this manifest and its children. -+ This only includes blobs in the content store. -+ type: ""integer"" -+ format: ""int64"" -+ example: 3987495 -+ Kind: -+ type: ""string"" -+ example: ""image"" -+ enum: -+ - ""image"" -+ - ""attestation"" -+ - ""unknown"" -+ description: | -+ The kind of the manifest. -+ -+ kind | description -+ -------------|----------------------------------------------------------- -+ image | Image manifest that can be used to start a container. -+ attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest. -+ ImageData: -+ description: | -+ The image data for the image manifest. -+ This field is only populated when Kind is ""image"". -+ type: ""object"" -+ x-nullable: true -+ x-omitempty: true -+ required: [""Platform"", ""Containers"", ""Size"", ""UnpackedSize""] -+ properties: -+ Platform: -+ $ref: ""#/definitions/OCIPlatform"" -+ description: | -+ OCI platform of the image. This will be the platform specified in the -+ manifest descriptor from the index/manifest list. -+ If it's not available, it will be obtained from the image config. -+ Containers: -+ description: | -+ The IDs of the containers that are using this image. -+ type: ""array"" -+ items: -+ type: ""string"" -+ example: [""ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430"", ""abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e""] -+ Size: -+ type: ""object"" -+ x-nullable: false -+ required: [""Unpacked""] -+ properties: -+ Unpacked: -+ type: ""integer"" -+ format: ""int64"" -+ example: 3987495 -+ description: | -+ Unpacked is the size (in bytes) of the locally unpacked -+ (uncompressed) image content that's directly usable by the containers -+ running this image. -+ It's independent of the distributable content - e.g. -+ the image might still have an unpacked data that's still used by -+ some container even when the distributable/compressed content is -+ already gone. -+ AttestationData: -+ description: | -+ The image data for the attestation manifest. -+ This field is only populated when Kind is ""attestation"". -+ type: ""object"" -+ x-nullable: true -+ x-omitempty: true -+ required: [""For""] -+ properties: -+ For: -+ description: | -+ The digest of the image manifest that this attestation is for. -+ type: ""string"" -+ example: ""sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f"" -+ - paths: - /containers/json: - get: -@@ -7585,7 +7712,7 @@ paths: - * Memory usage % = `(used_memory / available_memory) * 100.0` - * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` - * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` -- * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` -+ * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` - * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` - operationId: ""ContainerStats"" - produces: [""application/json""] -@@ -8622,6 +8749,11 @@ paths: - description: ""Show digest information as a `RepoDigests` field on each image."" - type: ""boolean"" - default: false -+ - name: ""manifests"" -+ in: ""query"" -+ description: ""Include `Manifests` in the image summary."" -+ type: ""boolean"" -+ default: false - tags: [""Image""] - /build: - post: -@@ -9094,12 +9226,23 @@ paths: - parameters: - - name: ""name"" - in: ""path"" -- description: ""Image name or ID."" -+ description: | -+ Name of the image to push. For example, `registry.example.com/myimage`. -+ The image must be present in the local image store with the same name. -+ -+ The name should be provided without tag; if a tag is provided, it -+ is ignored. For example, `registry.example.com/myimage:latest` is -+ considered equivalent to `registry.example.com/myimage`. -+ -+ Use the `tag` parameter to specify the tag to push. - type: ""string"" - required: true - - name: ""tag"" - in: ""query"" -- description: ""The tag to associate with the image on the registry."" -+ description: | -+ Tag of the image to push. For example, `latest`. If no tag is provided, -+ all tags of the given image that are present in the local image store -+ are pushed. - type: ""string"" - - name: ""X-Registry-Auth"" - in: ""header"" -diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go -index 727da8839cc27..03648fb7b5dcb 100644 ---- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go -+++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go -@@ -1,6 +1,7 @@ - package container // import ""github.com/docker/docker/api/types/container"" - - import ( -+ ""errors"" - ""fmt"" - ""strings"" - -@@ -325,12 +326,12 @@ func ValidateRestartPolicy(policy RestartPolicy) error { - if policy.MaximumRetryCount < 0 { - msg += "" and cannot be negative"" - } -- return &errInvalidParameter{fmt.Errorf(msg)} -+ return &errInvalidParameter{errors.New(msg)} - } - return nil - case RestartPolicyOnFailure: - if policy.MaximumRetryCount < 0 { -- return &errInvalidParameter{fmt.Errorf(""invalid restart policy: maximum retry count cannot be negative"")} -+ return &errInvalidParameter{errors.New(""invalid restart policy: maximum retry count cannot be negative"")} - } - return nil - case """": -diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go -index 0c39ab5f18b53..0914b2a4410c7 100644 ---- a/vendor/github.com/docker/docker/api/types/filters/parse.go -+++ b/vendor/github.com/docker/docker/api/types/filters/parse.go -@@ -196,7 +196,7 @@ func (args Args) Match(field, source string) bool { - } - - // GetBoolOrDefault returns a boolean value of the key if the key is present --// and is intepretable as a boolean value. Otherwise the default value is returned. -+// and is interpretable as a boolean value. Otherwise the default value is returned. - // Error is not nil only if the filter values are not valid boolean or are conflicting. - func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) { - fieldValues, ok := args.fields[key] -diff --git a/vendor/github.com/docker/docker/api/types/image/manifest.go b/vendor/github.com/docker/docker/api/types/image/manifest.go -new file mode 100644 -index 0000000000000..db8a00830e70d ---- /dev/null -+++ b/vendor/github.com/docker/docker/api/types/image/manifest.go -@@ -0,0 +1,99 @@ -+package image -+ -+import ( -+ ""github.com/opencontainers/go-digest"" -+ ocispec ""github.com/opencontainers/image-spec/specs-go/v1"" -+) -+ -+type ManifestKind string -+ -+const ( -+ ManifestKindImage ManifestKind = ""image"" -+ ManifestKindAttestation ManifestKind = ""attestation"" -+ ManifestKindUnknown ManifestKind = ""unknown"" -+) -+ -+type ManifestSummary struct { -+ // ID is the content-addressable ID of an image and is the same as the -+ // digest of the image manifest. -+ // -+ // Required: true -+ ID string `json:""ID""` -+ -+ // Descriptor is the OCI descriptor of the image. -+ // -+ // Required: true -+ Descriptor ocispec.Descriptor `json:""Descriptor""` -+ -+ // Indicates whether all the child content (image config, layers) is -+ // fully available locally -+ // -+ // Required: true -+ Available bool `json:""Available""` -+ -+ // Size is the size information of the content related to this manifest. -+ // Note: These sizes only take the locally available content into account. -+ // -+ // Required: true -+ Size struct { -+ // Content is the size (in bytes) of all the locally present -+ // content in the content store (e.g. image config, layers) -+ // referenced by this manifest and its children. -+ // This only includes blobs in the content store. -+ Content int64 `json:""Content""` -+ -+ // Total is the total size (in bytes) of all the locally present -+ // data (both distributable and non-distributable) that's related to -+ // this manifest and its children. -+ // This equal to the sum of [Content] size AND all the sizes in the -+ // [Size] struct present in the Kind-specific data struct. -+ // For example, for an image kind (Kind == ManifestKindImage), -+ // this would include the size of the image content and unpacked -+ // image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). -+ Total int64 `json:""Total""` -+ } `json:""Size""` -+ -+ // Kind is the kind of the image manifest. -+ // -+ // Required: true -+ Kind ManifestKind `json:""Kind""` -+ -+ // Fields below are specific to the kind of the image manifest. -+ -+ // Present only if Kind == ManifestKindImage. -+ ImageData *ImageProperties `json:""ImageData,omitempty""` -+ -+ // Present only if Kind == ManifestKindAttestation. -+ AttestationData *AttestationProperties `json:""AttestationData,omitempty""` -+} -+ -+type ImageProperties struct { -+ // Platform is the OCI platform object describing the platform of the image. -+ // -+ // Required: true -+ Platform ocispec.Platform `json:""Platform""` -+ -+ Size struct { -+ // Unpacked is the size (in bytes) of the locally unpacked -+ // (uncompressed) image content that's directly usable by the containers -+ // running this image. -+ // It's independent of the distributable content - e.g. -+ // the image might still have an unpacked data that's still used by -+ // some container even when the distributable/compressed content is -+ // already gone. -+ // -+ // Required: true -+ Unpacked int64 `json:""Unpacked""` -+ } -+ -+ // Containers is an array containing the IDs of the containers that are -+ // using this image. -+ // -+ // Required: true -+ Containers []string `json:""Containers""` -+} -+ -+type AttestationProperties struct { -+ // For is the digest of the image manifest that this attestation is for. -+ For digest.Digest `json:""For""` -+} -diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go -index 8e32c9af86897..923ebe5a06a02 100644 ---- a/vendor/github.com/docker/docker/api/types/image/opts.go -+++ b/vendor/github.com/docker/docker/api/types/image/opts.go -@@ -76,6 +76,9 @@ type ListOptions struct { - - // ContainerCount indicates whether container count should be computed. - ContainerCount bool -+ -+ // Manifests indicates whether the image manifests should be returned. -+ Manifests bool - } - - // RemoveOptions holds parameters to remove images. -diff --git a/vendor/github.com/docker/docker/api/types/image/summary.go b/vendor/github.com/docker/docker/api/types/image/summary.go -index f1e3e2ef018f8..e87e216a28b35 100644 ---- a/vendor/github.com/docker/docker/api/types/image/summary.go -+++ b/vendor/github.com/docker/docker/api/types/image/summary.go -@@ -1,10 +1,5 @@ - package image - --// This file was generated by the swagger tool. --// Editing this file might prove futile when you re-run the swagger generate command -- --// Summary summary --// swagger:model Summary - type Summary struct { - - // Number of containers using this image. Includes both stopped and running -@@ -17,7 +12,7 @@ type Summary struct { - Containers int64 `json:""Containers""` - - // Date and time at which the image was created as a Unix timestamp -- // (number of seconds sinds EPOCH). -+ // (number of seconds since EPOCH). - // - // Required: true - Created int64 `json:""Created""` -@@ -47,6 +42,14 @@ type Summary struct { - // Required: true - ParentID string `json:""ParentId""` - -+ // Manifests is a list of image manifests available in this image. It -+ // provides a more detailed view of the platform-specific image manifests or -+ // other image-attached data like build attestations. -+ // -+ // WARNING: This is experimental and may change at any time without any backward -+ // compatibility. -+ Manifests []ManifestSummary `json:""Manifests,omitempty""` -+ - // List of content-addressable digests of locally available image manifests - // that the image is referenced from. Multiple manifests can refer to the - // same image. -diff --git a/vendor/github.com/docker/docker/api/types/registry/authconfig.go b/vendor/github.com/docker/docker/api/types/registry/authconfig.go -index 97a924e37477f..8e383f6e60cb3 100644 ---- a/vendor/github.com/docker/docker/api/types/registry/authconfig.go -+++ b/vendor/github.com/docker/docker/api/types/registry/authconfig.go -@@ -34,10 +34,9 @@ type AuthConfig struct { - } - - // EncodeAuthConfig serializes the auth configuration as a base64url encoded --// RFC4648, section 5) JSON string for sending through the X-Registry-Auth header. -+// ([RFC4648, section 5]) JSON string for sending through the X-Registry-Auth header. - // --// For details on base64url encoding, see: --// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5 -+// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 - func EncodeAuthConfig(authConfig AuthConfig) (string, error) { - buf, err := json.Marshal(authConfig) - if err != nil { -@@ -46,15 +45,14 @@ func EncodeAuthConfig(authConfig AuthConfig) (string, error) { - return base64.URLEncoding.EncodeToString(buf), nil - } - --// DecodeAuthConfig decodes base64url encoded (RFC4648, section 5) JSON -+// DecodeAuthConfig decodes base64url encoded ([RFC4648, section 5]) JSON - // authentication information as sent through the X-Registry-Auth header. - // --// This function always returns an AuthConfig, even if an error occurs. It is up -+// This function always returns an [AuthConfig], even if an error occurs. It is up - // to the caller to decide if authentication is required, and if the error can - // be ignored. - // --// For details on base64url encoding, see: --// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5 -+// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 - func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) { - if authEncoded == """" { - return &AuthConfig{}, nil -@@ -69,7 +67,7 @@ func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) { - // clients and API versions. Current clients and API versions expect authentication - // to be provided through the X-Registry-Auth header. - // --// Like DecodeAuthConfig, this function always returns an AuthConfig, even if an -+// Like [DecodeAuthConfig], this function always returns an [AuthConfig], even if an - // error occurs. It is up to the caller to decide if authentication is required, - // and if the error can be ignored. - func DecodeAuthConfigBody(rdr io.ReadCloser) (*AuthConfig, error) { -diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go -index 3eae4b9b297d2..1b4be6fffbab6 100644 ---- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go -+++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go -@@ -122,7 +122,7 @@ type CAConfig struct { - SigningCAKey string `json:"",omitempty""` - - // If this value changes, and there is no specified signing cert and key, -- // then the swarm is forced to generate a new root certificate ane key. -+ // then the swarm is forced to generate a new root certificate and key. - ForceRotate uint64 `json:"",omitempty""` - } - -diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go -index bbd9ff0b8f977..618a4816209a7 100644 ---- a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go -+++ b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go -@@ -414,7 +414,7 @@ type Info struct { - // the Volume has not been successfully created yet. - VolumeID string `json:"",omitempty""` - -- // AccessibleTopolgoy is the topology this volume is actually accessible -+ // AccessibleTopology is the topology this volume is actually accessible - // from. - AccessibleTopology []Topology `json:"",omitempty""` - } -diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go -index a9cc1e21e5dd4..bef679431dce5 100644 ---- a/vendor/github.com/docker/docker/client/image_list.go -+++ b/vendor/github.com/docker/docker/client/image_list.go -@@ -11,6 +11,11 @@ import ( - ) - - // ImageList returns a list of images in the docker host. -+// -+// Experimental: Setting the [options.Manifest] will populate -+// [image.Summary.Manifests] with information about image manifests. -+// This is experimental and might change in the future without any backward -+// compatibility. - func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) { - var images []image.Summary - -@@ -47,6 +52,9 @@ func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([] - if options.SharedSize && versions.GreaterThanOrEqualTo(cli.version, ""1.42"") { - query.Set(""shared-size"", ""1"") - } -+ if options.Manifests && versions.GreaterThanOrEqualTo(cli.version, ""1.47"") { -+ query.Set(""manifests"", ""1"") -+ } - - serverResp, err := cli.get(ctx, ""/images/json"", query, nil) - defer ensureReaderClosed(serverResp) -diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go -index 61490c8d1a209..17704366dc349 100644 ---- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go -+++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go -@@ -93,7 +93,7 @@ type Decoder interface { - // Reset resets the decoder - // Reset is called for certain events, such as log rotations - Reset(io.Reader) -- // Decode decodes the next log messeage from the stream -+ // Decode decodes the next log message from the stream - Decode() (*logger.Message, error) - // Close signals to the decoder that it can release whatever resources it was using. - Close() -diff --git a/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go b/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go -index 7a8c6aebd627b..1951ca88bdeca 100644 ---- a/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go -+++ b/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go -@@ -12,7 +12,7 @@ import ( - ) - - func openPluginStream(a *pluginAdapter) (io.WriteCloser, error) { -- // Make sure to also open with read (in addition to write) to avoid borken pipe errors on plugin failure. -+ // Make sure to also open with read (in addition to write) to avoid broken pipe errors on plugin failure. - // It is up to the plugin to keep track of pipes that it should re-attach to, however. - // If the plugin doesn't open for reads, then the container will block once the pipe is full. - f, err := fifo.OpenFifo(context.Background(), a.fifoPath, unix.O_RDWR|unix.O_CREAT|unix.O_NONBLOCK, 0o700) -diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go -index 035160c834e4d..8d2c8857fb03a 100644 ---- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go -+++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go -@@ -290,7 +290,7 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, - } - - // Stream is an io.Writer for output with utilities to get the output's file --// descriptor and to detect wether it's a terminal. -+// descriptor and to detect whether it's a terminal. - // - // it is subset of the streams.Out type in - // https://pkg.go.dev/github.com/docker/cli@v20.10.17+incompatible/cli/streams#Out -diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/vendor/github.com/docker/docker/pkg/plugins/plugins.go -index 2efd8508bfe7e..96c8e2b7fdf2c 100644 ---- a/vendor/github.com/docker/docker/pkg/plugins/plugins.go -+++ b/vendor/github.com/docker/docker/pkg/plugins/plugins.go -@@ -236,7 +236,6 @@ func loadWithRetry(name string, retry bool) (*Plugin, error) { - storage.Unlock() - - err = pl.activate() -- - if err != nil { - storage.Lock() - delete(storage.plugins, name) -diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go -index 3792c67a9e454..3ea3012b188ba 100644 ---- a/vendor/github.com/docker/docker/pkg/pools/pools.go -+++ b/vendor/github.com/docker/docker/pkg/pools/pools.go -@@ -124,7 +124,7 @@ func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { - } - - // NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back --// into the pool and closes the writer if it's an io.Writecloser. -+// into the pool and closes the writer if it's an io.WriteCloser. - func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { - return ioutils.NewWriteCloserWrapper(w, func() error { - buf.Flush() -diff --git a/vendor/modules.txt b/vendor/modules.txt -index 145781690bb96..f5a58ec64203c 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -599,7 +599,7 @@ github.com/distribution/reference - ## explicit - github.com/dlclark/regexp2 - github.com/dlclark/regexp2/syntax --# github.com/docker/docker v27.1.2+incompatible -+# github.com/docker/docker v27.3.1+incompatible - ## explicit - github.com/docker/docker/api - github.com/docker/docker/api/types",fix,"update module github.com/docker/docker to v27.3.1+incompatible (#14753) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -46149500647cd53c0a5f2c5d9b62da8382f5962c,2023-12-12 22:30:07,J Stickler,"Update Getting Started topic and graphics (#11350) - -**What this PR does / why we need it**: - -**Which issue(s) this PR fixes**: -Final PR to complete fixes #9228 - -**Special notes for your reviewer**: -Updates Getting Started topic to be a general overview, links to Grafana -topics, and Agent configuration file. -Adds new Quick Start topic for running Loki Docker compose file locally -that includes working sample queries. -Adds three new graphics -Removes two unused graphics",False,"diff --git a/docs/sources/get-started/_index.md b/docs/sources/get-started/_index.md -index c01a2cbc7a35b..b433b62f8f73c 100644 ---- a/docs/sources/get-started/_index.md -+++ b/docs/sources/get-started/_index.md -@@ -1,138 +1,114 @@ - --- --title: Get started -+title: Get started with Grafana Loki -+menuTitle: Get started - weight: 200 --description: How to create and use a simple Loki cluster for testing and evaluation purposes. --aliases: -- - ./getting-started -+description: Overview of the steps for getting started using Loki to collect logs. - --- - --# Get started -+# Get started with Grafana Loki -+ -+Loki is a horizontally-scalable, highly-available, multi-tenant log aggregation system inspired by Prometheus. It is designed to be very cost effective and easy to operate. It does not index the contents of the logs, but rather a set of labels for each log stream. -+ -+Because all Loki implementations are unique, the installation process is -+different for every customer. But there are some steps in the process that -+should be common to every installation. -+ -+To collect logs and view your log data generally involves the following steps: -+ -+![Loki implementation steps](loki-install.png) -+ -+1. Install Loki on Kubernetes in simple scalable mode, using the recommended [Helm chart](https://grafana.com/docs/loki/latest/setup/install/helm/install-scalable/). Supply the Helm chart with your object storage authentication details. -+ - [Storage options](https://grafana.com/docs/loki/latest/operations/storage/) -+ - [Configuration reference](https://grafana.com/docs/loki/latest/configure/) -+ - There are [examples](https://grafana.com/docs/loki/latest/configure/examples/) for specific Object Storage providers that you can modify. -+1. Deploy the [Grafana Agent](https://grafana.com/docs/agent/latest/flow/) to collect logs from your applications. -+ 1. On Kubernetes, deploy the Grafana Agent using the Helm chart. Configure Grafana Agent to scrape logs from your Kubernetes cluster, and add your Loki endpoint details. See the following section for an example Grafana Agent Flow configuration file. -+ 1. Add [labels](https://grafana.com/docs/loki/latest/get-started/labels/) to your logs following our [best practices](https://grafana.com/docs/loki/latest/get-started/labels/bp-labels/). Most Loki users start by adding labels which describe where the logs are coming from (region, cluster, environment, etc.). -+1. Deploy [Grafana](https://grafana.com/docs/grafana/latest/setup-grafana/) or [Grafana Cloud](https://grafana.com/docs/grafana-cloud/quickstart/) and configure a [Loki datasource](https://grafana.com/docs/grafana/latest/datasources/loki/configure-loki-data-source/). -+1. Select the [Explore feature](https://grafana.com/docs/grafana/latest/explore/) in the Grafana main menu. To [view logs in Explore](https://grafana.com/docs/grafana/latest/explore/logs-integration/): -+ 1. Pick a time range. -+ 1. Choose the Loki datasource. -+ 1. Use [LogQL](https://grafana.com/docs/loki/latest/query/) in the [query editor](https://grafana.com/docs/grafana/latest/datasources/loki/query-editor/), use the Builder view to explore your labels, or select from sample pre-configured queries using the **Kick start your query** button. -+ -+**Next steps:** Learn more about Loki’s query language, [LogQL](https://grafana.com/docs/loki/latest/query/). -+ -+ -+## Example Grafana Agent configuration file to ship Kubernetes Pod logs to Loki -+ -+To deploy Grafana Agent to collect Pod logs from your Kubernetes cluster and ship them to Loki, you an use the Grafana Agent Helm chart, and a `values.yaml` file. -+ -+1. Install Loki with the [Helm chart](https://grafana.com/docs/loki/latest/setup/install/helm/install-scalable/). -+1. Deploy the Grafana Agent, using the [Grafana Agent Helm chart](https://grafana.com/docs/agent/latest/flow/setup/install/kubernetes/) and this example `values.yaml` file updating the value for `forward_to = [loki.write.endpoint.receiver]`: -+ -+ -+```yaml -+agent: -+ mounts: -+ varlog: true -+ configMap: -+ content: | -+ logging { -+ level = ""info"" -+ format = ""logfmt"" -+ } -+ -+ discovery.kubernetes ""k8s"" { -+ role = ""pod"" -+ } -+ -+ discovery.relabel ""k8s"" { -+ targets = discovery.kubernetes.k8s.targets -+ -+ rule { -+ source_labels = [""__meta_kubernetes_pod_name""] -+ action = ""replace"" -+ target_label = ""pod"" -+ } -+ rule { -+ source_labels = [""__meta_kubernetes_pod_container_name""] -+ action = ""replace"" -+ target_label = ""container"" -+ } -+ -+ rule { -+ source_labels = [""__meta_kubernetes_namespace"", ""__meta_kubernetes_pod_label_name""] -+ target_label = ""job"" -+ separator = ""/"" -+ } -+ -+ rule { -+ source_labels = [""__meta_kubernetes_pod_uid"", ""__meta_kubernetes_pod_container_name""] -+ target_label = ""__path__"" -+ separator = ""/"" -+ replacement = ""/var/log/pods/*$1/*.log"" -+ } -+ } -+ -+ local.file_match ""pods"" { -+ path_targets = discovery.relabel.k8s.output -+ } -+ -+ loki.source.file ""pods"" { -+ targets = local.file_match.pods.targets -+ forward_to = [loki.write.endpoint.receiver] -+ } -+ -+ loki.write ""endpoint"" { -+ endpoint { -+ url = ""http://loki-gateway:80/loki/api/v1/push"" -+ tenant_id = ""cloud"" -+ } -+ } - --This guide assists the reader to create and use a simple Loki cluster. --The cluster is intended for testing, development, and evaluation; --it will not meet most production requirements. -- --The test environment runs the [flog](https://github.com/mingrammer/flog) app to generate log lines. --Promtail is the test environment's agent (or client) that captures the log lines and pushes them to the Loki cluster through a gateway. --In a typical environment, the log-generating app and the agent run together, but in locations distinct from the Loki cluster. This guide runs each piece of the test environment locally, in Docker containers. -- --Grafana provides a way to pose queries against the logs stored in Loki and visualize query results. -- --![Simple scalable deployment test environment](simple-scalable-test-environment.png) -- --The test environment uses Docker compose to instantiate these parts, each in its own container: -- --- One [single scalable deployment]({{< relref ""../get-started/deployment-modes"" >}}) mode **Loki** instance has: -- - One Loki read component -- - One Loki write component -- - **Minio** is Loki's storage back end in the test environment. --- The **gateway** receives requests and redirects them to the appropriate container based on the request's URL. --- **Flog** generates log lines. --- **Promtail** scrapes the log lines from flog, and pushes them to Loki through the gateway. --- **Grafana** provides visualization of the log lines captured within Loki. -- --## Prerequisites -- --- [Docker](https://docs.docker.com/install) --- [Docker Compose](https://docs.docker.com/compose/install) -- --## Obtain the test environment -- --1. Create a directory called `evaluate-loki` for the test environment. Make `evaluate-loki` your current working directory: -- ```bash -- mkdir evaluate-loki -- cd evaluate-loki -- ``` --1. Download `loki-config.yaml`, `promtail-local-config.yaml`, and `docker-compose.yaml`: -- -- ```bash -- wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/loki-config.yaml -O loki-config.yaml -- wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/promtail-local-config.yaml -O promtail-local-config.yaml -- wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/docker-compose.yaml -O docker-compose.yaml -- ``` -- --## Deploy the test environment -- --1. With `evaluate-loki` as the current working directory, deploy the test environment using `docker-compose`: -- ```bash -- docker-compose up -d -- ``` --1. (Optional) Verify that the Loki cluster is up and running. The read component returns `ready` when you point a web browser at http://localhost:3101/ready. The message `Query Frontend not ready: not ready: number of schedulers this worker is connected to is 0` will show prior to the read component being ready. --The write component returns `ready` when you point a web browser at http://localhost:3102/ready. The message `Ingester not ready: waiting for 15s after being ready` will show prior to the write component being ready. -- --## Use Grafana and the test environment -- --Use [Grafana](/docs/grafana/latest/) to query and observe the log lines captured in the Loki cluster by navigating a browser to http://localhost:3000. --The Grafana instance has Loki configured as a [data source](/docs/grafana/latest/datasources/loki/). -- --Click on the Grafana instance's [Explore](/docs/grafana/latest/explore/) icon to bring up the explore pane. -- --Use the Explore dropdown menu to choose the Loki data source and bring up the Loki query browser. -- --Try some queries. --Enter your query into the **Log browser** box, and click on the blue **Run query** button. -- --To see all the log lines that flog has generated: - ``` --{container=""evaluate-loki-flog-1""} --``` -- --The flog app will generate log lines for invented HTTP requests. --To see all `GET` log lines, enter the query: -- --``` --{container=""evaluate-loki-flog-1""} |= ""GET"" --``` --For `POST` methods: --``` --{container=""evaluate-loki-flog-1""} |= ""POST"" --``` -- --To see every log line with a 401 status (unauthorized error): --``` --{container=""evaluate-loki-flog-1""} | json | status=""401"" --``` --To see every log line other than those that contain the value 401: --``` --{container=""evaluate-loki-flog-1""} != ""401"" --``` -- --Refer to [query examples]({{< relref ""../query/query_examples"" >}}) for more examples. -- --## Stop and clean up the test environment -+ - --To break down the test environment: -+1. Then install Grafana Agent in your Kubernetes cluster using: - --- Close the Grafana browser window -- --- With `evaluate-loki` as the current working directory, stop and remove all the Docker containers: - ```bash -- docker-compose down -+ helm upgrade -f values.yaml agent grafana/grafana-agent - ``` -- --## Modifying the flog app output -- --You can modify the flog app's log line generation by changing --its configuration. --Choose one of these two ways to apply a new configuration: -- --- To remove already-generated logs, restart the test environment with a new configuration. -- -- 1. With `evaluate-loki` as the current working directory, stop and clean up an existing test environment: -- ``` -- docker-compose down -- ``` -- 1. Edit the `docker-compose.yaml` file. Within the YAML file, change the `flog.command` field's value to specify your flog output. Refer to the `flog` [command line arguments](https://hub.docker.com/r/mingrammer/flog). -- 1. With `evaluate-loki` as the current working directory, instantiate the new test environment: -- ``` -- docker-compose up -d -- ``` -- --- To keep already-generated logs in the running test environment, restart flog with a new configuration. -- -- 1. Edit the `docker-compose.yaml` file. Within the YAML file, change the `flog.command` field's value to specify your flog output. -- 1. With `evaluate-loki` as the current working directory, restart only the flog app within the currently-running test environment: -- ``` -- docker-compose up -d --force-recreate flog -- ``` -- -+This sample file is configured to: -+- Install Grafana Agent to discover Pod logs. -+- Add `container` and `pod` labels to the logs. -+- Push the logs to your Loki cluster using the tenant ID `cloud`. -diff --git a/docs/sources/get-started/get-started-flog.png b/docs/sources/get-started/get-started-flog.png -new file mode 100644 -index 0000000000000..9b07645aa0354 -Binary files /dev/null and b/docs/sources/get-started/get-started-flog.png differ -diff --git a/docs/sources/get-started/grafana-query-builder.png b/docs/sources/get-started/grafana-query-builder.png -new file mode 100644 -index 0000000000000..59535a80c4b3f -Binary files /dev/null and b/docs/sources/get-started/grafana-query-builder.png differ -diff --git a/docs/sources/get-started/loki-install.png b/docs/sources/get-started/loki-install.png -new file mode 100644 -index 0000000000000..74a416de0c5c3 -Binary files /dev/null and b/docs/sources/get-started/loki-install.png differ -diff --git a/docs/sources/get-started/loki-overview-1.png b/docs/sources/get-started/loki-overview-1.png -deleted file mode 100644 -index a0c2cae725117..0000000000000 -Binary files a/docs/sources/get-started/loki-overview-1.png and /dev/null differ -diff --git a/docs/sources/get-started/quick-start.md b/docs/sources/get-started/quick-start.md -new file mode 100644 -index 0000000000000..be9c2e1fe3e3a ---- /dev/null -+++ b/docs/sources/get-started/quick-start.md -@@ -0,0 +1,172 @@ -+--- -+title: Quick start to run Loki locally -+menuTitle: Loki quick start -+weight: 550 -+description: How to create and use a simple local Loki cluster for testing and evaluation purposes. -+--- -+ -+# Quick start to run Loki locally -+ -+If you want to experiment with Loki, you can run Loki locally using the Docker Compose file that ships with Loki. It runs Loki in a [scalable monolithic deployment](https://grafana.com/docs/loki/latest/get-started/deployment-modes/#monolithic-mode) mode and includes a sample application to generate logs. -+ -+The Docker Compose configuration instantiates the following components, each in its own container: -+ -+- **Flog** a sample application which generates log lines. -+- **Promtail** which scrapes the log lines from Flog, and pushes them to Loki through the gateway. -+- **Gateway** (NGINX) which receives requests and redirects them to the appropriate container based on the request's URL. -+- One Loki **read** component. -+- One Loki **write** component. -+- **Minio** an S3-compatible object store which Loki uses to store its index and chunks. -+- **Grafana** which provides visualization of the log lines captured within Loki. -+ -+![Getting started sample application](get-started-flog.png) -+ -+## Installing Loki and collecting sample logs -+ -+Prerequisites -+- Docker -+- Docker Compose -+ -+{{% admonition type=""note"" %}} -+Note that this quick start assumes you are running Linux. -+{{% /admonition %}} -+ -+**Steps:** -+ -+1. Create a directory called `evaluate-loki` for the demo environment. Make `evaluate-loki` your current working directory: -+ -+ ```bash -+ mkdir evaluate-loki -+ cd evaluate-loki -+ ``` -+ -+1. Download `loki-config.yaml`, `promtail-local-config.yaml`, and `docker-compose.yaml`: -+ -+ ```bash -+ wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/loki-config.yaml -O loki-config.yaml -+ wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/promtail-local-config.yaml -O promtail-local-config.yaml -+ wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/docker-compose.yaml -O docker-compose.yaml -+ ``` -+ -+1. Deploy the sample Docker image. -+ -+ With `evaluate-loki` as the current working directory, start the demo environment using `docker compose`: -+ -+ ```bash -+ docker compose up -d -+ ``` -+ -+ You should see something similar to the following: -+ ```bash -+ ✔ Network evaluate-loki_loki Created 0.1s -+ ✔ Container evaluate-loki-minio-1 Started 0.6s -+ ✔ Container evaluate-loki-flog-1 Started 0.6s -+ ✔ Container evaluate-loki-write-1 Started 0.8s -+ ✔ Container evaluate-loki-read-1 Started 0.8s -+ ✔ Container evaluate-loki-gateway-1 Started 1.1s -+ ✔ Container evaluate-loki-grafana-1 Started 1.4s -+ ✔ Container evaluate-loki-promtail-1 Started 1.4s -+ ``` -+ -+1. (Optional) Verify that the Loki cluster is up and running. -+ - The read component returns `ready` when you point a web browser at [http://localhost:3101/ready](http://localhost:3101/ready). The message `Query Frontend not ready: not ready: number of schedulers this worker is connected to is 0` will show prior to the read component being ready. -+ - The write component returns `ready` when you point a web browser at [http://localhost:3102/ready](http://localhost:3102/ready). The message `Ingester not ready: waiting for 15s after being ready` will show prior to the write component being ready. -+ -+## Viewing your logs in Grafana -+ -+Once you have collected logs, you will want to view them. You can view your logs using the command line interface, [LogCLI](/docs/loki/latest/query/logcli/), but the easiest way to view your logs is with Grafana. -+ -+1. Use Grafana to query the Loki data source. -+ -+ The test environment includes [Grafana](https://grafana.com/docs/grafana/latest/), which you can use to query and observe the sample logs generated by the flog application. You can access the Grafana cluster by navigating to [http://localhost:3000](http://localhost:3000). The Grafana instance provided with this demo has a Loki [datasource](https://grafana.com/docs/grafana/latest/datasources/loki/) already configured. -+ -+![Grafana Explore](grafana-query-builder.png) -+ -+1. From the Grafana main menu, click the **Explore** icon (1) to launch the Explore tab. To learn more about Explore, refer the [Explore](https://grafana.com/docs/grafana/latest/explore/) documentation. -+ -+1. From the menu in the dashboard header (2), select the Loki data source. This displays the Loki query editor. In the query editor you use the Loki query language, [LogQL](https://grafana.com/docs/loki/latest/query/), to query your logs. -+ To learn more about the query editor, refer to the [query editor documentation](https://grafana.com/docs/grafana/latest/datasources/loki/query-editor/). -+ -+1. The Loki query editor has two modes (3): -+ -+ - [Builder mode](https://grafana.com/docs/grafana/latest/datasources/loki/query-editor/#builder-mode), which provides a visual query designer. -+ - [Code mode](https://grafana.com/docs/grafana/latest/datasources/loki/query-editor/#code-mode), which provides a feature-rich editor for writing LogQL queries. -+ -+ Next we’ll walk through a few simple queries using both the builder and code views. -+ -+1. Click **Code** (3) to work in Code mode in the query editor. -+ -+ Here are some basic sample queries to get you started using LogQL. Note that these queries assume that you followed the instructions to create a directory called `evaluate-loki`. If you installed in a different directory, you’ll need to modify these queries to match your installation directory. After copying any of these queries into the query editor, click **Run Query** to execute the query. -+ -+ 1. View all the log lines which have the container label ""flog"": -+ ```bash -+ {container=""evaluate-loki-flog-1""} -+ ``` -+ In Loki, this is called a log stream. Loki uses [labels](https://grafana.com/docs/loki/latest/get-started/labels/) as metadata to describe log streams. Loki queries always start with a label selector. In the query above, the label selector is `container`. -+ -+ 1. To view all the log lines which have the container label ""grafana"": -+ ```bash -+ {container=""evaluate-loki-grafana-1""} -+ ``` -+ -+ 1. Find all the log lines in the container=flog stream that contain the string ""status"": -+ ```bash -+ {container=""evaluate-loki-flog-1""} |= `status` -+ ``` -+ -+ 1. Find all the log lines in the container=flog stream where the JSON field ""status"" is ""404"": -+ ```bash -+ {container=""evaluate-loki-flog-1""} | json | status=`404` -+ ``` -+ -+ 1. Calculate the number of logs per second where the JSON field ""status"" is ""404"": -+ ```bash -+ sum by(container) (rate({container=""evaluate-loki-flog-1""} | json | status=`404` [$__auto])) -+ ``` -+ The final query above is a metric query which returns a time series. This will trigger Grafana to draw a graph of the results. You can change the type of graph for a different view of the data. Click **Bars** to view a bar graph of the data. -+ -+1. Click the **Builder** tab (3) to return to Builder mode in the query editor. -+ 1. In Builder view, click **Kick start your query**(4). -+ 1. Expand the **Log query starters** section. -+ 1. Select the first choice, **Parse log lines with logfmt parser**, by clicking **Use this query**. -+ 1. On the Explore tab, select **container** from the **Label filters** menu then select a container from the **value** menu. -+ 1. Click **Run Query**(6). -+ -+For a thorough introduction to LogQL, refer to the [LogQL reference](https://grafana.com/docs/loki/latest/query/). -+ -+## Sample queries (code view) -+Here are some more sample queries that you can run using the Flog sample data. -+ -+To see all the log lines that flog has generated, enter the LogQL query: -+```bash -+{container=""evaluate-loki-flog-1""}|= `` -+``` -+The flog app generates log lines for simulated HTTP requests. -+ -+To see all `GET` log lines, enter the LogQL query: -+```bash -+{container=""evaluate-loki-flog-1""} |= ""GET"" -+``` -+ -+To see all `POST` methods, enter the LogQL query: -+```bash -+{container=""evaluate-loki-flog-1""} |= ""POST"" -+``` -+ -+To see every log line with a 401 status (unauthorized error), enter the LogQL query: -+```bash -+{container=""evaluate-loki-flog-1""} | json | status=""401"" -+``` -+ -+To see every log line that does not contain the value 401: -+```bash -+{container=""evaluate-loki-flog-1""} != ""401"" -+``` -+ -+For more examples, refer to the [query documentation](https://grafana.com/docs/loki/latest/query/query_examples/). -+ -+## Complete metrics, logs, traces, and profiling example -+ -+If you would like to use a demo that includes Mimir, Loki, Tempo, and Grafana, you can use [Introduction to Metrics, Logs, Traces, and Profiling in Grafana](https://github.com/grafana/intro-to-mlt). `Intro-to-mltp` provides a self-contained environment for learning about Mimir, Loki, Tempo, and Grafana. -+ -+The project includes detailed explanations of each component and annotated configurations for a single-instance deployment. Data from `intro-to-mltp` can also be pushed to Grafana Cloud. -diff --git a/docs/sources/get-started/simple-scalable-test-environment.png b/docs/sources/get-started/simple-scalable-test-environment.png -deleted file mode 100644 -index 950cb70c30996..0000000000000 -Binary files a/docs/sources/get-started/simple-scalable-test-environment.png and /dev/null differ -diff --git a/examples/getting-started/docker-compose.yaml b/examples/getting-started/docker-compose.yaml -index 01b765ccd1a17..83dcde94d273e 100644 ---- a/examples/getting-started/docker-compose.yaml -+++ b/examples/getting-started/docker-compose.yaml -@@ -189,6 +189,6 @@ services: - - flog: - image: mingrammer/flog -- command: -f json -d 1s -l -+ command: -f json -d 200ms -l - networks: - - loki",unknown,"Update Getting Started topic and graphics (#11350) - -**What this PR does / why we need it**: - -**Which issue(s) this PR fixes**: -Final PR to complete fixes #9228 - -**Special notes for your reviewer**: -Updates Getting Started topic to be a general overview, links to Grafana -topics, and Agent configuration file. -Adds new Quick Start topic for running Loki Docker compose file locally -that includes working sample queries. -Adds three new graphics -Removes two unused graphics" -b7359c5d5379f0fc3ad958a752cca53db8449ab6,2023-06-16 17:18:56,Salva Corts,"Revert ""Add summary stats and metrics for stats cache (#9536)"" (#9721) - -This reverts commit af287ac3eba46e04ad1d5cf8051262ea0f2a25de. - -There is a bug in this PR that inflates the stats returned for the query -since we reuse the stats ctx in the query execution engine.",False,"diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go -index 470f68a745d75..14abf943793d2 100644 ---- a/pkg/logql/engine.go -+++ b/pkg/logql/engine.go -@@ -241,7 +241,7 @@ func (q *query) Exec(ctx context.Context) (logqlmodel.Result, error) { - - // records query statistics - start := time.Now() -- statsCtx, ctx := stats.GetOrCreateContext(ctx) -+ statsCtx, ctx := stats.NewContext(ctx) - metadataCtx, ctx := metadata.NewContext(ctx) - - data, err := q.Eval(ctx) -diff --git a/pkg/logql/metrics.go b/pkg/logql/metrics.go -index 51f1ff9736f1f..d0e9f7b5c6778 100644 ---- a/pkg/logql/metrics.go -+++ b/pkg/logql/metrics.go -@@ -142,9 +142,6 @@ func RecordRangeAndInstantQueryMetrics( - ""cache_index_req"", stats.Caches.Index.EntriesRequested, - ""cache_index_hit"", stats.Caches.Index.EntriesFound, - ""cache_index_download_time"", stats.Caches.Index.CacheDownloadTime(), -- ""cache_stats_results_req"", stats.Caches.StatsResult.EntriesRequested, -- ""cache_stats_results_hit"", stats.Caches.StatsResult.EntriesFound, -- ""cache_stats_results_download_time"", stats.Caches.StatsResult.CacheDownloadTime(), - ""cache_result_req"", stats.Caches.Result.EntriesRequested, - ""cache_result_hit"", stats.Caches.Result.EntriesFound, - ""cache_result_download_time"", stats.Caches.Result.CacheDownloadTime(), -diff --git a/pkg/logqlmodel/stats/context.go b/pkg/logqlmodel/stats/context.go -index 9780536aa04d6..f5db410a54008 100644 ---- a/pkg/logqlmodel/stats/context.go -+++ b/pkg/logqlmodel/stats/context.go -@@ -69,15 +69,6 @@ func NewContext(ctx context.Context) (*Context, context.Context) { - return contextData, ctx - } - --func GetOrCreateContext(ctx context.Context) (*Context, context.Context) { -- v, ok := ctx.Value(statsKey).(*Context) -- if !ok { -- return NewContext(ctx) -- } -- -- return v, ctx --} -- - // FromContext returns the statistics context. - func FromContext(ctx context.Context) *Context { - v, ok := ctx.Value(statsKey).(*Context) -@@ -207,7 +198,6 @@ func (c *Caches) Merge(m Caches) { - c.Chunk.Merge(m.Chunk) - c.Index.Merge(m.Index) - c.Result.Merge(m.Result) -- c.StatsResult.Merge(m.StatsResult) - } - - func (c *Cache) Merge(m Cache) { -@@ -401,8 +391,6 @@ func (c *Context) getCacheStatsByType(t CacheType) *Cache { - stats = &c.caches.Index - case ResultCache: - stats = &c.caches.Result -- case StatsResultCache: -- stats = &c.caches.StatsResult - default: - return nil - } -@@ -467,13 +455,6 @@ func (c Caches) Log(log log.Logger) { - ""Cache.Index.BytesSent"", humanize.Bytes(uint64(c.Index.BytesSent)), - ""Cache.Index.BytesReceived"", humanize.Bytes(uint64(c.Index.BytesReceived)), - ""Cache.Index.DownloadTime"", c.Index.CacheDownloadTime(), -- ""Cache.StatsResult.Requests"", c.StatsResult.Requests, -- ""Cache.StatsResult.EntriesRequested"", c.StatsResult.EntriesRequested, -- ""Cache.StatsResult.EntriesFound"", c.StatsResult.EntriesFound, -- ""Cache.StatsResult.EntriesStored"", c.StatsResult.EntriesStored, -- ""Cache.StatsResult.BytesSent"", humanize.Bytes(uint64(c.StatsResult.BytesSent)), -- ""Cache.StatsResult.BytesReceived"", humanize.Bytes(uint64(c.StatsResult.BytesReceived)), -- ""Cache.Result.DownloadTime"", c.Result.CacheDownloadTime(), - ""Cache.Result.Requests"", c.Result.Requests, - ""Cache.Result.EntriesRequested"", c.Result.EntriesRequested, - ""Cache.Result.EntriesFound"", c.Result.EntriesFound, -diff --git a/pkg/logqlmodel/stats/stats.pb.go b/pkg/logqlmodel/stats/stats.pb.go -index 40b5aafbd3eba..1a4fa5b6ebba6 100644 ---- a/pkg/logqlmodel/stats/stats.pb.go -+++ b/pkg/logqlmodel/stats/stats.pb.go -@@ -95,10 +95,9 @@ func (m *Result) GetCaches() Caches { - } - - type Caches struct { -- Chunk Cache `protobuf:""bytes,1,opt,name=chunk,proto3"" json:""chunk""` -- Index Cache `protobuf:""bytes,2,opt,name=index,proto3"" json:""index""` -- Result Cache `protobuf:""bytes,3,opt,name=result,proto3"" json:""result""` -- StatsResult Cache `protobuf:""bytes,4,opt,name=statsResult,proto3"" json:""statsResult""` -+ Chunk Cache `protobuf:""bytes,1,opt,name=chunk,proto3"" json:""chunk""` -+ Index Cache `protobuf:""bytes,2,opt,name=index,proto3"" json:""index""` -+ Result Cache `protobuf:""bytes,3,opt,name=result,proto3"" json:""result""` - } - - func (m *Caches) Reset() { *m = Caches{} } -@@ -154,13 +153,6 @@ func (m *Caches) GetResult() Cache { - return Cache{} - } - --func (m *Caches) GetStatsResult() Cache { -- if m != nil { -- return m.StatsResult -- } -- return Cache{} --} -- - // Summary is the summary of a query statistics. - type Summary struct { - // Total bytes processed per second. -@@ -676,69 +668,68 @@ func init() { - func init() { proto.RegisterFile(""pkg/logqlmodel/stats/stats.proto"", fileDescriptor_6cdfe5d2aea33ebb) } - - var fileDescriptor_6cdfe5d2aea33ebb = []byte{ -- // 979 bytes of a gzipped FileDescriptorProto -- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xcb, 0x6f, 0xe3, 0x44, -- 0x18, 0x8f, 0x93, 0x75, 0xd2, 0xce, 0xf6, 0xb5, 0xd3, 0x2e, 0x6b, 0x40, 0xb2, 0x2b, 0x9f, 0x2a, -- 0x81, 0x1a, 0xf1, 0x90, 0x10, 0x88, 0x95, 0x90, 0xbb, 0xac, 0x54, 0x69, 0x11, 0xcb, 0x57, 0xb8, -- 0x70, 0x73, 0xec, 0xd9, 0xc4, 0xaa, 0x63, 0xa7, 0x7e, 0xc0, 0xee, 0x8d, 0x1b, 0x47, 0xf8, 0x33, -- 0xb8, 0xf0, 0x7f, 0xec, 0xb1, 0xc7, 0x3d, 0x59, 0x34, 0xbd, 0x80, 0x4f, 0x95, 0xb8, 0x23, 0x34, -- 0xdf, 0x4c, 0x6c, 0x8f, 0xe3, 0x48, 0x7b, 0x89, 0xe7, 0xfb, 0x3d, 0xe6, 0xfd, 0x7d, 0x13, 0x72, -- 0xbc, 0xb8, 0x9c, 0x8e, 0xc3, 0x78, 0x7a, 0x15, 0xce, 0x63, 0x9f, 0x85, 0xe3, 0x34, 0x73, 0xb3, -- 0x54, 0xfc, 0x9e, 0x2e, 0x92, 0x38, 0x8b, 0xa9, 0x8e, 0xc1, 0x7b, 0x47, 0xd3, 0x78, 0x1a, 0x23, -- 0x32, 0xe6, 0x2d, 0x41, 0xda, 0xff, 0x6a, 0x64, 0x08, 0x2c, 0xcd, 0xc3, 0x8c, 0x7e, 0x4e, 0x46, -- 0x69, 0x3e, 0x9f, 0xbb, 0xc9, 0x2b, 0x43, 0x3b, 0xd6, 0x4e, 0xee, 0x7f, 0xbc, 0x77, 0x2a, 0xba, -- 0xb9, 0x10, 0xa8, 0xb3, 0xff, 0xba, 0xb0, 0x7a, 0x65, 0x61, 0xad, 0x64, 0xb0, 0x6a, 0x70, 0xeb, -- 0x55, 0xce, 0x92, 0x80, 0x25, 0x46, 0x5f, 0xb1, 0x7e, 0x27, 0xd0, 0xda, 0x2a, 0x65, 0xb0, 0x6a, -- 0xd0, 0xc7, 0x64, 0x2b, 0x88, 0xa6, 0x2c, 0xcd, 0x58, 0x62, 0x0c, 0xd0, 0xbb, 0x2f, 0xbd, 0xe7, -- 0x12, 0x76, 0x0e, 0xa4, 0xb9, 0x12, 0x42, 0xd5, 0xa2, 0x9f, 0x92, 0xa1, 0xe7, 0x7a, 0x33, 0x96, -- 0x1a, 0xf7, 0xd0, 0xbc, 0x2b, 0xcd, 0x67, 0x08, 0x3a, 0xbb, 0xd2, 0xaa, 0xa3, 0x08, 0xa4, 0xd6, -- 0xfe, 0x47, 0x23, 0x43, 0xa1, 0xa0, 0x1f, 0x11, 0xdd, 0x9b, 0xe5, 0xd1, 0xa5, 0x5c, 0xf3, 0x4e, -- 0xd3, 0xdf, 0xb0, 0x73, 0x09, 0x88, 0x0f, 0xb7, 0x04, 0x91, 0xcf, 0x5e, 0xca, 0xb5, 0x6e, 0xb0, -- 0xa0, 0x04, 0xc4, 0x87, 0x4f, 0x33, 0xc1, 0x5d, 0x96, 0x6b, 0x54, 0x3d, 0x7b, 0xd2, 0x23, 0x35, -- 0x20, 0xbf, 0xf4, 0x8c, 0xdc, 0x47, 0x99, 0x38, 0x20, 0xb9, 0x42, 0xd5, 0x7a, 0x28, 0xad, 0x4d, -- 0x21, 0x34, 0x03, 0xbb, 0xbc, 0x47, 0x46, 0xf2, 0x04, 0xe9, 0x0f, 0xe4, 0xd1, 0xe4, 0x55, 0xc6, -- 0xd2, 0xe7, 0x49, 0xec, 0xb1, 0x34, 0x65, 0xfe, 0x73, 0x96, 0x5c, 0x30, 0x2f, 0x8e, 0x7c, 0x5c, -- 0xfe, 0xc0, 0x79, 0xbf, 0x2c, 0xac, 0x4d, 0x12, 0xd8, 0x44, 0xf0, 0x6e, 0xc3, 0x20, 0xea, 0xec, -- 0xb6, 0x5f, 0x77, 0xbb, 0x41, 0x02, 0x9b, 0x08, 0x7a, 0x4e, 0x0e, 0xb3, 0x38, 0x73, 0x43, 0x47, -- 0x19, 0x16, 0x77, 0x70, 0xe0, 0x3c, 0x2a, 0x0b, 0xab, 0x8b, 0x86, 0x2e, 0xb0, 0xea, 0xea, 0x99, -- 0x32, 0x14, 0xee, 0x68, 0xb3, 0x2b, 0x95, 0x86, 0x2e, 0x90, 0x9e, 0x90, 0x2d, 0xf6, 0x92, 0x79, -- 0xdf, 0x07, 0x73, 0x66, 0xe8, 0xc7, 0xda, 0x89, 0xe6, 0xec, 0xf0, 0xbb, 0xb9, 0xc2, 0xa0, 0x6a, -- 0xd1, 0x0f, 0xc8, 0xf6, 0x55, 0xce, 0x72, 0x86, 0xd2, 0x21, 0x4a, 0x77, 0xcb, 0xc2, 0xaa, 0x41, -- 0xa8, 0x9b, 0xf4, 0x94, 0x90, 0x34, 0x9f, 0x88, 0xac, 0x48, 0x8d, 0x11, 0x4e, 0x6c, 0xaf, 0x2c, -- 0xac, 0x06, 0x0a, 0x8d, 0x36, 0x7d, 0x46, 0x8e, 0x70, 0x76, 0x5f, 0x47, 0x19, 0x72, 0x2c, 0xcb, -- 0x93, 0x88, 0xf9, 0xc6, 0x16, 0x3a, 0x8d, 0xb2, 0xb0, 0x3a, 0x79, 0xe8, 0x44, 0xa9, 0x4d, 0x86, -- 0xe9, 0x22, 0x0c, 0xb2, 0xd4, 0xd8, 0x46, 0x3f, 0xe1, 0xb7, 0x51, 0x20, 0x20, 0xbf, 0xa8, 0x99, -- 0xb9, 0x89, 0x9f, 0x1a, 0xa4, 0xa1, 0x41, 0x04, 0xe4, 0xd7, 0xfe, 0x92, 0x8c, 0x64, 0xca, 0xf3, -- 0x2c, 0x49, 0xb3, 0x38, 0x61, 0xad, 0xc4, 0xba, 0xe0, 0x58, 0x9d, 0x25, 0x28, 0x01, 0xf1, 0xb1, -- 0xff, 0xec, 0x93, 0xad, 0xf3, 0x3a, 0xb3, 0x77, 0x70, 0xaa, 0xc0, 0xf8, 0x45, 0x17, 0x17, 0x54, -- 0x77, 0x0e, 0xca, 0xc2, 0x52, 0x70, 0x50, 0x22, 0xfa, 0x94, 0x50, 0x8c, 0xcf, 0x78, 0xa6, 0xa6, -- 0xdf, 0xb8, 0x19, 0x7a, 0xc5, 0x2d, 0x7c, 0xa7, 0x2c, 0xac, 0x0e, 0x16, 0x3a, 0xb0, 0x6a, 0x74, -- 0x07, 0xe3, 0x54, 0x5e, 0xba, 0x7a, 0x74, 0x89, 0x83, 0x12, 0xd1, 0x2f, 0xc8, 0x5e, 0x7d, 0x65, -- 0x2e, 0x58, 0x94, 0xc9, 0x1b, 0x46, 0xcb, 0xc2, 0x6a, 0x31, 0xd0, 0x8a, 0xeb, 0xfd, 0xd2, 0xdf, -- 0x7a, 0xbf, 0x7e, 0xeb, 0x13, 0x1d, 0xf9, 0x6a, 0x60, 0xb1, 0x08, 0x60, 0x2f, 0x64, 0x3e, 0xd7, -- 0x03, 0x57, 0x0c, 0xb4, 0x62, 0xfa, 0x2d, 0x79, 0xd8, 0x40, 0x9e, 0xc4, 0x3f, 0x47, 0x61, 0xec, -- 0xfa, 0xd5, 0xae, 0xbd, 0x5b, 0x16, 0x56, 0xb7, 0x00, 0xba, 0x61, 0x7e, 0x06, 0x9e, 0x82, 0x61, -- 0x02, 0x0c, 0xea, 0x33, 0x58, 0x67, 0xa1, 0x03, 0xab, 0x4b, 0x73, 0xab, 0xf0, 0x71, 0xac, 0xbb, -- 0x34, 0xdb, 0xbf, 0x0e, 0x88, 0x8e, 0x3c, 0xdf, 0x91, 0x19, 0x73, 0x7d, 0x21, 0xe6, 0xc5, 0xa0, -- 0x79, 0x14, 0x2a, 0x03, 0xad, 0x58, 0xf1, 0xe2, 0x01, 0xe1, 0x99, 0xb4, 0xbd, 0xc8, 0x40, 0x2b, -- 0xa6, 0x67, 0xe4, 0x81, 0xcf, 0xbc, 0x78, 0xbe, 0x48, 0xb0, 0x5c, 0x88, 0xa1, 0x87, 0x68, 0x7f, -- 0x58, 0x16, 0xd6, 0x3a, 0x09, 0xeb, 0x50, 0xbb, 0x13, 0x31, 0x87, 0x51, 0x77, 0x27, 0x62, 0x1a, -- 0xeb, 0x10, 0x7d, 0x4c, 0xf6, 0xdb, 0xf3, 0x10, 0xc5, 0xe1, 0xb0, 0x2c, 0xac, 0x36, 0x05, 0x6d, -- 0x80, 0xdb, 0xf1, 0x78, 0x9f, 0xe4, 0x8b, 0x30, 0xf0, 0x5c, 0x6e, 0xdf, 0xae, 0xed, 0x2d, 0x0a, -- 0xda, 0x80, 0xfd, 0x5f, 0x9f, 0xe8, 0xf8, 0x44, 0xf1, 0x54, 0x62, 0xa2, 0xdc, 0x3c, 0x8d, 0xf3, -- 0x48, 0x49, 0xe4, 0x26, 0x0e, 0x4a, 0x44, 0xbf, 0x22, 0x07, 0x6c, 0x55, 0xa4, 0xae, 0x72, 0x5e, -- 0x12, 0xc4, 0x85, 0xd4, 0x9d, 0xa3, 0xb2, 0xb0, 0xd6, 0x38, 0x58, 0x43, 0xe8, 0x67, 0x64, 0x57, -- 0x62, 0x98, 0x23, 0xe2, 0xe1, 0xd0, 0x9d, 0x07, 0x65, 0x61, 0xa9, 0x04, 0xa8, 0x21, 0x37, 0xe2, -- 0x4b, 0x07, 0xcc, 0x63, 0xc1, 0x4f, 0xd5, 0x33, 0x81, 0x46, 0x85, 0x00, 0x35, 0xe4, 0x05, 0x1f, -- 0x01, 0xcc, 0x7c, 0x71, 0x65, 0xb0, 0xe0, 0x57, 0x20, 0xd4, 0x4d, 0xfe, 0x8e, 0x24, 0x62, 0xae, -- 0xe2, 0x7e, 0xe8, 0xe2, 0x1d, 0x59, 0x61, 0x50, 0xb5, 0xf8, 0x06, 0xfa, 0xcd, 0x4c, 0x1a, 0xd5, -- 0xb5, 0xa8, 0x89, 0x83, 0x12, 0x39, 0x93, 0xeb, 0x1b, 0xb3, 0xf7, 0xe6, 0xc6, 0xec, 0xdd, 0xdd, -- 0x98, 0xda, 0x2f, 0x4b, 0x53, 0xfb, 0x63, 0x69, 0x6a, 0xaf, 0x97, 0xa6, 0x76, 0xbd, 0x34, 0xb5, -- 0xbf, 0x96, 0xa6, 0xf6, 0xf7, 0xd2, 0xec, 0xdd, 0x2d, 0x4d, 0xed, 0xf7, 0x5b, 0xb3, 0x77, 0x7d, -- 0x6b, 0xf6, 0xde, 0xdc, 0x9a, 0xbd, 0x1f, 0x3f, 0x9c, 0x06, 0xd9, 0x2c, 0x9f, 0x9c, 0x7a, 0xf1, -- 0x7c, 0x3c, 0x4d, 0xdc, 0x17, 0x6e, 0xe4, 0x8e, 0xc3, 0xf8, 0x32, 0x18, 0x77, 0xfd, 0xcd, 0x9c, -- 0x0c, 0xf1, 0x4f, 0xe4, 0x27, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x32, 0x2b, 0x75, 0xc7, 0x85, -- 0x0a, 0x00, 0x00, -+ // 967 bytes of a gzipped FileDescriptorProto -+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xcf, 0x6f, 0xe3, 0x44, -+ 0x14, 0x8e, 0x9b, 0x3a, 0x69, 0x87, 0xfe, 0xda, 0xd9, 0x2e, 0x6b, 0x40, 0xb2, 0xab, 0x9c, 0x2a, -+ 0x81, 0x1a, 0xf1, 0x43, 0x42, 0x20, 0x56, 0x42, 0xee, 0xb2, 0x52, 0xa5, 0x45, 0x2c, 0xaf, 0x70, -+ 0xe1, 0xe6, 0xd8, 0xb3, 0x89, 0x55, 0xc7, 0x4e, 0x3d, 0x36, 0xec, 0xde, 0xb8, 0x71, 0x84, 0x3f, -+ 0x82, 0x03, 0x17, 0xfe, 0x8f, 0x3d, 0xf6, 0xb8, 0x27, 0x8b, 0xa6, 0x17, 0xe4, 0xd3, 0x4a, 0xdc, -+ 0x11, 0x9a, 0x37, 0x13, 0xdb, 0xe3, 0x38, 0x12, 0x97, 0xcc, 0xbc, 0xef, 0x7b, 0xdf, 0xcc, 0xf3, -+ 0xcc, 0x7b, 0x6f, 0x42, 0x4e, 0x16, 0x57, 0xd3, 0x71, 0x94, 0x4c, 0xaf, 0xa3, 0x79, 0x12, 0xb0, -+ 0x68, 0xcc, 0x33, 0x2f, 0xe3, 0xf2, 0xf7, 0x6c, 0x91, 0x26, 0x59, 0x42, 0x4d, 0x34, 0xde, 0x3d, -+ 0x9e, 0x26, 0xd3, 0x04, 0x91, 0xb1, 0x98, 0x49, 0x72, 0xf4, 0x8f, 0x41, 0x06, 0xc0, 0x78, 0x1e, -+ 0x65, 0xf4, 0x33, 0x32, 0xe4, 0xf9, 0x7c, 0xee, 0xa5, 0x2f, 0x2d, 0xe3, 0xc4, 0x38, 0x7d, 0xeb, -+ 0xa3, 0x83, 0x33, 0xb9, 0xcc, 0xa5, 0x44, 0xdd, 0xc3, 0x57, 0x85, 0xd3, 0x2b, 0x0b, 0x67, 0xe5, -+ 0x06, 0xab, 0x89, 0x90, 0x5e, 0xe7, 0x2c, 0x0d, 0x59, 0x6a, 0x6d, 0x69, 0xd2, 0x6f, 0x25, 0x5a, -+ 0x4b, 0x95, 0x1b, 0xac, 0x26, 0xf4, 0x11, 0xd9, 0x09, 0xe3, 0x29, 0xe3, 0x19, 0x4b, 0xad, 0x3e, -+ 0x6a, 0x0f, 0x95, 0xf6, 0x42, 0xc1, 0xee, 0x91, 0x12, 0x57, 0x8e, 0x50, 0xcd, 0xe8, 0x27, 0x64, -+ 0xe0, 0x7b, 0xfe, 0x8c, 0x71, 0x6b, 0x1b, 0xc5, 0xfb, 0x4a, 0x7c, 0x8e, 0xa0, 0xbb, 0xaf, 0xa4, -+ 0x26, 0x3a, 0x81, 0xf2, 0x1d, 0xfd, 0x6e, 0x90, 0x81, 0xf4, 0xa0, 0x1f, 0x12, 0xd3, 0x9f, 0xe5, -+ 0xf1, 0x95, 0xfa, 0xe6, 0xbd, 0xa6, 0xbe, 0x21, 0x17, 0x2e, 0x20, 0x07, 0x21, 0x09, 0xe3, 0x80, -+ 0xbd, 0x50, 0xdf, 0xba, 0x41, 0x82, 0x2e, 0x20, 0x07, 0x11, 0x66, 0x8a, 0xa7, 0xac, 0xbe, 0x51, -+ 0xd7, 0x1c, 0x28, 0x8d, 0xf2, 0x01, 0x35, 0x8e, 0xca, 0x6d, 0x32, 0x54, 0x87, 0x4f, 0xbf, 0x27, -+ 0x0f, 0x27, 0x2f, 0x33, 0xc6, 0x9f, 0xa5, 0x89, 0xcf, 0x38, 0x67, 0xc1, 0x33, 0x96, 0x5e, 0x32, -+ 0x3f, 0x89, 0x03, 0x8c, 0xbc, 0xef, 0xbe, 0x57, 0x16, 0xce, 0x26, 0x17, 0xd8, 0x44, 0x88, 0x65, -+ 0xa3, 0x30, 0xee, 0x5c, 0x76, 0xab, 0x5e, 0x76, 0x83, 0x0b, 0x6c, 0x22, 0xe8, 0x05, 0xb9, 0x9f, -+ 0x25, 0x99, 0x17, 0xb9, 0xda, 0xb6, 0xf8, 0xf1, 0x7d, 0xf7, 0x61, 0x59, 0x38, 0x5d, 0x34, 0x74, -+ 0x81, 0xd5, 0x52, 0x4f, 0xb5, 0xad, 0xf0, 0xba, 0x9b, 0x4b, 0xe9, 0x34, 0x74, 0x81, 0xf4, 0x94, -+ 0xec, 0xb0, 0x17, 0xcc, 0xff, 0x2e, 0x9c, 0x33, 0xcb, 0x3c, 0x31, 0x4e, 0x0d, 0x77, 0x4f, 0xa4, -+ 0xd5, 0x0a, 0x83, 0x6a, 0x46, 0xdf, 0x27, 0xbb, 0xd7, 0x39, 0xcb, 0x19, 0xba, 0x0e, 0xd0, 0x75, -+ 0xbf, 0x2c, 0x9c, 0x1a, 0x84, 0x7a, 0x4a, 0xcf, 0x08, 0xe1, 0xf9, 0x44, 0x26, 0x34, 0xb7, 0x86, -+ 0x18, 0xd8, 0x41, 0x59, 0x38, 0x0d, 0x14, 0x1a, 0x73, 0xfa, 0x94, 0x1c, 0x63, 0x74, 0x5f, 0xc5, -+ 0x19, 0x72, 0x2c, 0xcb, 0xd3, 0x98, 0x05, 0xd6, 0x0e, 0x2a, 0xad, 0xb2, 0x70, 0x3a, 0x79, 0xe8, -+ 0x44, 0xe9, 0x88, 0x0c, 0xf8, 0x22, 0x0a, 0x33, 0x6e, 0xed, 0xa2, 0x9e, 0x88, 0x44, 0x92, 0x08, -+ 0xa8, 0x11, 0x7d, 0x66, 0x5e, 0x1a, 0x70, 0x8b, 0x34, 0x7c, 0x10, 0x01, 0x35, 0x8e, 0xbe, 0x20, -+ 0x43, 0x55, 0xad, 0x22, 0xc1, 0x79, 0x96, 0xa4, 0xac, 0x55, 0x13, 0x97, 0x02, 0xab, 0x13, 0x1c, -+ 0x5d, 0x40, 0x0e, 0xa3, 0x3f, 0xb7, 0xc8, 0xce, 0x45, 0x5d, 0x94, 0x7b, 0x18, 0x2a, 0x30, 0x91, -+ 0xde, 0x32, 0x41, 0x4d, 0xf7, 0xa8, 0x2c, 0x1c, 0x0d, 0x07, 0xcd, 0xa2, 0x4f, 0x08, 0x45, 0xfb, -+ 0x5c, 0x14, 0x19, 0xff, 0xda, 0xcb, 0x50, 0x2b, 0xb3, 0xf0, 0xed, 0xb2, 0x70, 0x3a, 0x58, 0xe8, -+ 0xc0, 0xaa, 0xdd, 0x5d, 0xb4, 0xb9, 0x4a, 0xba, 0x7a, 0x77, 0x85, 0x83, 0x66, 0xd1, 0xcf, 0xc9, -+ 0x41, 0x9d, 0x32, 0x97, 0x2c, 0xce, 0x54, 0x86, 0xd1, 0xb2, 0x70, 0x5a, 0x0c, 0xb4, 0xec, 0xfa, -+ 0xbc, 0xcc, 0xff, 0x7d, 0x5e, 0xbf, 0x6e, 0x11, 0x13, 0xf9, 0x6a, 0x63, 0xf9, 0x11, 0xc0, 0x9e, -+ 0xab, 0x7a, 0xae, 0x37, 0xae, 0x18, 0x68, 0xd9, 0xf4, 0x1b, 0xf2, 0xa0, 0x81, 0x3c, 0x4e, 0x7e, -+ 0x8a, 0xa3, 0xc4, 0x0b, 0xaa, 0x53, 0x7b, 0xa7, 0x2c, 0x9c, 0x6e, 0x07, 0xe8, 0x86, 0xc5, 0x1d, -+ 0xf8, 0x1a, 0x86, 0x05, 0xd0, 0xaf, 0xef, 0x60, 0x9d, 0x85, 0x0e, 0xac, 0xee, 0xaa, 0xdb, 0x7a, -+ 0xbb, 0x13, 0x58, 0x77, 0x57, 0x1d, 0xfd, 0xd2, 0x27, 0x26, 0xf2, 0xe2, 0x44, 0x66, 0xcc, 0x0b, -+ 0xa4, 0xb3, 0x68, 0x06, 0xcd, 0xab, 0xd0, 0x19, 0x68, 0xd9, 0x9a, 0x16, 0x2f, 0x08, 0xef, 0xa4, -+ 0xad, 0x45, 0x06, 0x5a, 0x36, 0x3d, 0x27, 0xf7, 0x02, 0xe6, 0x27, 0xf3, 0x45, 0x8a, 0xed, 0x42, -+ 0x6e, 0x3d, 0x40, 0xf9, 0x83, 0xb2, 0x70, 0xd6, 0x49, 0x58, 0x87, 0xda, 0x8b, 0xc8, 0x18, 0x86, -+ 0xdd, 0x8b, 0xc8, 0x30, 0xd6, 0x21, 0xfa, 0x88, 0x1c, 0xb6, 0xe3, 0x90, 0xcd, 0xe1, 0x7e, 0x59, -+ 0x38, 0x6d, 0x0a, 0xda, 0x80, 0x90, 0xe3, 0xf5, 0x3e, 0xce, 0x17, 0x51, 0xe8, 0x7b, 0x42, 0xbe, -+ 0x5b, 0xcb, 0x5b, 0x14, 0xb4, 0x81, 0xd1, 0xbf, 0x5b, 0xc4, 0xc4, 0x87, 0x49, 0x94, 0x12, 0x93, -+ 0xed, 0xe6, 0x49, 0x92, 0xc7, 0x5a, 0x21, 0x37, 0x71, 0xd0, 0x2c, 0xfa, 0x25, 0x39, 0x62, 0xab, -+ 0x26, 0x75, 0x9d, 0x8b, 0x96, 0x20, 0x13, 0xd2, 0x74, 0x8f, 0xcb, 0xc2, 0x59, 0xe3, 0x60, 0x0d, -+ 0xa1, 0x9f, 0x92, 0x7d, 0x85, 0x61, 0x8d, 0xc8, 0x87, 0xc3, 0x74, 0xef, 0x95, 0x85, 0xa3, 0x13, -+ 0xa0, 0x9b, 0x42, 0x88, 0x2f, 0x1d, 0x30, 0x9f, 0x85, 0x3f, 0x56, 0xcf, 0x04, 0x0a, 0x35, 0x02, -+ 0x74, 0x53, 0x34, 0x7c, 0x04, 0xb0, 0xf2, 0x65, 0xca, 0x60, 0xc3, 0xaf, 0x40, 0xa8, 0xa7, 0xe2, -+ 0x1d, 0x49, 0x65, 0xac, 0x32, 0x3f, 0x4c, 0xf9, 0x8e, 0xac, 0x30, 0xa8, 0x66, 0xe2, 0x00, 0x83, -+ 0x66, 0x25, 0x0d, 0xeb, 0x5e, 0xd4, 0xc4, 0x41, 0xb3, 0xdc, 0xc9, 0xcd, 0xad, 0xdd, 0x7b, 0x7d, -+ 0x6b, 0xf7, 0xde, 0xdc, 0xda, 0xc6, 0xcf, 0x4b, 0xdb, 0xf8, 0x63, 0x69, 0x1b, 0xaf, 0x96, 0xb6, -+ 0x71, 0xb3, 0xb4, 0x8d, 0xbf, 0x96, 0xb6, 0xf1, 0xf7, 0xd2, 0xee, 0xbd, 0x59, 0xda, 0xc6, 0x6f, -+ 0x77, 0x76, 0xef, 0xe6, 0xce, 0xee, 0xbd, 0xbe, 0xb3, 0x7b, 0x3f, 0x7c, 0x30, 0x0d, 0xb3, 0x59, -+ 0x3e, 0x39, 0xf3, 0x93, 0xf9, 0x78, 0x9a, 0x7a, 0xcf, 0xbd, 0xd8, 0x1b, 0x47, 0xc9, 0x55, 0x38, -+ 0xee, 0xfa, 0x87, 0x38, 0x19, 0xe0, 0xff, 0xbf, 0x8f, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x8a, -+ 0x9c, 0x39, 0x00, 0x40, 0x0a, 0x00, 0x00, - } - - func (this *Result) Equal(that interface{}) bool { -@@ -802,9 +793,6 @@ func (this *Caches) Equal(that interface{}) bool { - if !this.Result.Equal(&that1.Result) { - return false - } -- if !this.StatsResult.Equal(&that1.StatsResult) { -- return false -- } - return true - } - func (this *Summary) Equal(that interface{}) bool { -@@ -1049,12 +1037,11 @@ func (this *Caches) GoString() string { - if this == nil { - return ""nil"" - } -- s := make([]string, 0, 8) -+ s := make([]string, 0, 7) - s = append(s, ""&stats.Caches{"") - s = append(s, ""Chunk: ""+strings.Replace(this.Chunk.GoString(), `&`, ``, 1)+"",\n"") - s = append(s, ""Index: ""+strings.Replace(this.Index.GoString(), `&`, ``, 1)+"",\n"") - s = append(s, ""Result: ""+strings.Replace(this.Result.GoString(), `&`, ``, 1)+"",\n"") -- s = append(s, ""StatsResult: ""+strings.Replace(this.StatsResult.GoString(), `&`, ``, 1)+"",\n"") - s = append(s, ""}"") - return strings.Join(s, """") - } -@@ -1236,16 +1223,6 @@ func (m *Caches) MarshalToSizedBuffer(dAtA []byte) (int, error) { - _ = i - var l int - _ = l -- { -- size, err := m.StatsResult.MarshalToSizedBuffer(dAtA[:i]) -- if err != nil { -- return 0, err -- } -- i -= size -- i = encodeVarintStats(dAtA, i, uint64(size)) -- } -- i-- -- dAtA[i] = 0x22 - { - size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { -@@ -1639,8 +1616,6 @@ func (m *Caches) Size() (n int) { - n += 1 + l + sovStats(uint64(l)) - l = m.Result.Size() - n += 1 + l + sovStats(uint64(l)) -- l = m.StatsResult.Size() -- n += 1 + l + sovStats(uint64(l)) - return n - } - -@@ -1821,7 +1796,6 @@ func (this *Caches) String() string { - `Chunk:` + strings.Replace(strings.Replace(this.Chunk.String(), ""Cache"", ""Cache"", 1), `&`, ``, 1) + `,`, - `Index:` + strings.Replace(strings.Replace(this.Index.String(), ""Cache"", ""Cache"", 1), `&`, ``, 1) + `,`, - `Result:` + strings.Replace(strings.Replace(this.Result.String(), ""Cache"", ""Cache"", 1), `&`, ``, 1) + `,`, -- `StatsResult:` + strings.Replace(strings.Replace(this.StatsResult.String(), ""Cache"", ""Cache"", 1), `&`, ``, 1) + `,`, - `}`, - }, """") - return s -@@ -2234,39 +2208,6 @@ func (m *Caches) Unmarshal(dAtA []byte) error { - return err - } - iNdEx = postIndex -- case 4: -- if wireType != 2 { -- return fmt.Errorf(""proto: wrong wireType = %d for field StatsResult"", wireType) -- } -- var msglen int -- for shift := uint(0); ; shift += 7 { -- if shift >= 64 { -- return ErrIntOverflowStats -- } -- if iNdEx >= l { -- return io.ErrUnexpectedEOF -- } -- b := dAtA[iNdEx] -- iNdEx++ -- msglen |= int(b&0x7F) << shift -- if b < 0x80 { -- break -- } -- } -- if msglen < 0 { -- return ErrInvalidLengthStats -- } -- postIndex := iNdEx + msglen -- if postIndex < 0 { -- return ErrInvalidLengthStats -- } -- if postIndex > l { -- return io.ErrUnexpectedEOF -- } -- if err := m.StatsResult.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { -- return err -- } -- iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) -diff --git a/pkg/logqlmodel/stats/stats.proto b/pkg/logqlmodel/stats/stats.proto -index afbbb8859e251..18d295ad36edd 100644 ---- a/pkg/logqlmodel/stats/stats.proto -+++ b/pkg/logqlmodel/stats/stats.proto -@@ -41,10 +41,6 @@ message Caches { - (gogoproto.nullable) = false, - (gogoproto.jsontag) = ""result"" - ]; -- Cache statsResult = 4 [ -- (gogoproto.nullable) = false, -- (gogoproto.jsontag) = ""statsResult"" -- ]; - } - - // Summary is the summary of a query statistics. -diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go -index 1b1ae339a0593..b2679f8fdc999 100644 ---- a/pkg/querier/queryrange/codec_test.go -+++ b/pkg/querier/queryrange/codec_test.go -@@ -1080,15 +1080,6 @@ var ( - ""requests"": 0, - ""downloadTime"": 0 - }, -- ""statsResult"": { -- ""entriesFound"": 0, -- ""entriesRequested"": 0, -- ""entriesStored"": 0, -- ""bytesReceived"": 0, -- ""bytesSent"": 0, -- ""requests"": 0, -- ""downloadTime"": 0 -- }, - ""result"": { - ""entriesFound"": 0, - ""entriesRequested"": 0, -diff --git a/pkg/querier/queryrange/prometheus_test.go b/pkg/querier/queryrange/prometheus_test.go -index 9e9fbf1d1a817..07203c9d5abc0 100644 ---- a/pkg/querier/queryrange/prometheus_test.go -+++ b/pkg/querier/queryrange/prometheus_test.go -@@ -66,15 +66,6 @@ var emptyStats = `""stats"": { - ""requests"": 0, - ""downloadTime"": 0 - }, -- ""statsResult"": { -- ""entriesFound"": 0, -- ""entriesRequested"": 0, -- ""entriesStored"": 0, -- ""bytesReceived"": 0, -- ""bytesSent"": 0, -- ""requests"": 0, -- ""downloadTime"": 0 -- }, - ""result"": { - ""entriesFound"": 0, - ""entriesRequested"": 0, -diff --git a/pkg/util/marshal/legacy/marshal_test.go b/pkg/util/marshal/legacy/marshal_test.go -index 3f0529b1fcd0a..cadba2c577745 100644 ---- a/pkg/util/marshal/legacy/marshal_test.go -+++ b/pkg/util/marshal/legacy/marshal_test.go -@@ -97,15 +97,6 @@ var queryTests = []struct { - ""requests"": 0, - ""downloadTime"": 0 - }, -- ""statsResult"": { -- ""entriesFound"": 0, -- ""entriesRequested"": 0, -- ""entriesStored"": 0, -- ""bytesReceived"": 0, -- ""bytesSent"": 0, -- ""requests"": 0, -- ""downloadTime"": 0 -- }, - ""result"": { - ""entriesFound"": 0, - ""entriesRequested"": 0, -diff --git a/pkg/util/marshal/marshal_test.go b/pkg/util/marshal/marshal_test.go -index 6bc130a6786d8..76c32e238341e 100644 ---- a/pkg/util/marshal/marshal_test.go -+++ b/pkg/util/marshal/marshal_test.go -@@ -106,15 +106,6 @@ var queryTests = []struct { - ""requests"": 0, - ""downloadTime"": 0 - }, -- ""statsResult"": { -- ""entriesFound"": 0, -- ""entriesRequested"": 0, -- ""entriesStored"": 0, -- ""bytesReceived"": 0, -- ""bytesSent"": 0, -- ""requests"": 0, -- ""downloadTime"": 0 -- }, - ""result"": { - ""entriesFound"": 0, - ""entriesRequested"": 0, -@@ -252,15 +243,6 @@ var queryTests = []struct { - ""requests"": 0, - ""downloadTime"": 0 - }, -- ""statsResult"": { -- ""entriesFound"": 0, -- ""entriesRequested"": 0, -- ""entriesStored"": 0, -- ""bytesReceived"": 0, -- ""bytesSent"": 0, -- ""requests"": 0, -- ""downloadTime"": 0 -- }, - ""result"": { - ""entriesFound"": 0, - ""entriesRequested"": 0, -@@ -419,15 +401,6 @@ var queryTests = []struct { - ""requests"": 0, - ""downloadTime"": 0 - }, -- ""statsResult"": { -- ""entriesFound"": 0, -- ""entriesRequested"": 0, -- ""entriesStored"": 0, -- ""bytesReceived"": 0, -- ""bytesSent"": 0, -- ""requests"": 0, -- ""downloadTime"": 0 -- }, - ""result"": { - ""entriesFound"": 0, - ""entriesRequested"": 0,",unknown,"Revert ""Add summary stats and metrics for stats cache (#9536)"" (#9721) - -This reverts commit af287ac3eba46e04ad1d5cf8051262ea0f2a25de. - -There is a bug in this PR that inflates the stats returned for the query -since we reuse the stats ctx in the query execution engine." -f84f0c7f733a2226148e66e7f603dbd4fc0a7bda,2025-03-01 02:55:46,renovate[bot],"fix(deps): update dependency lucide-react to ^0.477.0 (main) (#16509) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json -index 785c1613d2709..f3ebe9e1cb0ce 100644 ---- a/pkg/ui/frontend/package-lock.json -+++ b/pkg/ui/frontend/package-lock.json -@@ -36,7 +36,7 @@ - ""cmdk"": ""^1.0.0"", - ""date-fns"": ""^4.0.0"", - ""lodash"": ""^4.17.21"", -- ""lucide-react"": ""^0.476.0"", -+ ""lucide-react"": ""^0.477.0"", - ""next-themes"": ""^0.4.4"", - ""prism-react-renderer"": ""^2.4.1"", - ""react"": ""^18.2.0"", -@@ -4976,9 +4976,9 @@ - } - }, - ""node_modules/lucide-react"": { -- ""version"": ""0.476.0"", -- ""resolved"": ""https://registry.npmjs.org/lucide-react/-/lucide-react-0.476.0.tgz"", -- ""integrity"": ""sha512-x6cLTk8gahdUPje0hSgLN1/MgiJH+Xl90Xoxy9bkPAsMPOUiyRSKR4JCDPGVCEpyqnZXH3exFWNItcvra9WzUQ=="", -+ ""version"": ""0.477.0"", -+ ""resolved"": ""https://registry.npmjs.org/lucide-react/-/lucide-react-0.477.0.tgz"", -+ ""integrity"": ""sha512-yCf7aYxerFZAbd8jHJxjwe1j7jEMPptjnaOqdYeirFnEy85cNR3/L+o0I875CYFYya+eEVzZSbNuRk8BZPDpVw=="", - ""license"": ""ISC"", - ""peerDependencies"": { - ""react"": ""^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0"" -diff --git a/pkg/ui/frontend/package.json b/pkg/ui/frontend/package.json -index 572d60fc9bdfa..07ab5b476813b 100644 ---- a/pkg/ui/frontend/package.json -+++ b/pkg/ui/frontend/package.json -@@ -38,7 +38,7 @@ - ""cmdk"": ""^1.0.0"", - ""date-fns"": ""^4.0.0"", - ""lodash"": ""^4.17.21"", -- ""lucide-react"": ""^0.476.0"", -+ ""lucide-react"": ""^0.477.0"", - ""next-themes"": ""^0.4.4"", - ""prism-react-renderer"": ""^2.4.1"", - ""react"": ""^18.2.0"",",fix,"update dependency lucide-react to ^0.477.0 (main) (#16509) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -0e28452f1f1a9173167640d66abc992c31f23f7b,2022-07-15 11:47:58,Pablo,Targets: Add Heroku HTTPS drain target (#6448),False,"diff --git a/clients/pkg/promtail/client/fake/client.go b/clients/pkg/promtail/client/fake/client.go -index 9c01470aa2d00..33e886c30980c 100644 ---- a/clients/pkg/promtail/client/fake/client.go -+++ b/clients/pkg/promtail/client/fake/client.go -@@ -60,3 +60,11 @@ func (c *Client) StopNow() { - func (c *Client) Name() string { - return ""fake"" - } -+ -+// Clear is used to cleanup the buffered received entries, so the same client can be re-used between -+// test cases. -+func (c *Client) Clear() { -+ c.mtx.Lock() -+ defer c.mtx.Unlock() -+ c.received = []api.Entry{} -+} -diff --git a/clients/pkg/promtail/scrapeconfig/scrapeconfig.go b/clients/pkg/promtail/scrapeconfig/scrapeconfig.go -index c331ed03ac19a..5491a9f56f2c4 100644 ---- a/clients/pkg/promtail/scrapeconfig/scrapeconfig.go -+++ b/clients/pkg/promtail/scrapeconfig/scrapeconfig.go -@@ -33,17 +33,18 @@ import ( - - // Config describes a job to scrape. - type Config struct { -- JobName string `yaml:""job_name,omitempty""` -- PipelineStages stages.PipelineStages `yaml:""pipeline_stages,omitempty""` -- JournalConfig *JournalTargetConfig `yaml:""journal,omitempty""` -- SyslogConfig *SyslogTargetConfig `yaml:""syslog,omitempty""` -- GcplogConfig *GcplogTargetConfig `yaml:""gcplog,omitempty""` -- PushConfig *PushTargetConfig `yaml:""loki_push_api,omitempty""` -- WindowsConfig *WindowsEventsTargetConfig `yaml:""windows_events,omitempty""` -- KafkaConfig *KafkaTargetConfig `yaml:""kafka,omitempty""` -- GelfConfig *GelfTargetConfig `yaml:""gelf,omitempty""` -- CloudflareConfig *CloudflareConfig `yaml:""cloudflare,omitempty""` -- RelabelConfigs []*relabel.Config `yaml:""relabel_configs,omitempty""` -+ JobName string `yaml:""job_name,omitempty""` -+ PipelineStages stages.PipelineStages `yaml:""pipeline_stages,omitempty""` -+ JournalConfig *JournalTargetConfig `yaml:""journal,omitempty""` -+ SyslogConfig *SyslogTargetConfig `yaml:""syslog,omitempty""` -+ GcplogConfig *GcplogTargetConfig `yaml:""gcplog,omitempty""` -+ PushConfig *PushTargetConfig `yaml:""loki_push_api,omitempty""` -+ WindowsConfig *WindowsEventsTargetConfig `yaml:""windows_events,omitempty""` -+ KafkaConfig *KafkaTargetConfig `yaml:""kafka,omitempty""` -+ GelfConfig *GelfTargetConfig `yaml:""gelf,omitempty""` -+ CloudflareConfig *CloudflareConfig `yaml:""cloudflare,omitempty""` -+ HerokuDrainConfig *HerokuDrainTargetConfig `yaml:""heroku_drain,omitempty""` -+ RelabelConfigs []*relabel.Config `yaml:""relabel_configs,omitempty""` - // List of Docker service discovery configurations. - DockerSDConfigs []*moby.DockerSDConfig `yaml:""docker_sd_configs,omitempty""` - ServiceDiscoveryConfig ServiceDiscoveryConfig `yaml:"",inline""` -@@ -359,6 +360,19 @@ type GcplogTargetConfig struct { - UseIncomingTimestamp bool `yaml:""use_incoming_timestamp""` - } - -+// HerokuDrainTargetConfig describes a scrape config to listen and consume heroku logs, in the HTTPS drain manner. -+type HerokuDrainTargetConfig struct { -+ // Server is the weaveworks server config for listening connections -+ Server server.Config `yaml:""server""` -+ -+ // Labels optionally holds labels to associate with each record received on the push api. -+ Labels model.LabelSet `yaml:""labels""` -+ -+ // UseIncomingTimestamp sets the timestamp to the incoming heroku log entry timestamp. If false, -+ // promtail will assign the current timestamp to the log entry when it was processed. -+ UseIncomingTimestamp bool `yaml:""use_incoming_timestamp""` -+} -+ - // PushTargetConfig describes a scrape config that listens for Loki push messages. - type PushTargetConfig struct { - // Server is the weaveworks server config for listening connections -diff --git a/clients/pkg/promtail/targets/heroku/metrics.go b/clients/pkg/promtail/targets/heroku/metrics.go -new file mode 100644 -index 0000000000000..e3e61a6910715 ---- /dev/null -+++ b/clients/pkg/promtail/targets/heroku/metrics.go -@@ -0,0 +1,27 @@ -+package heroku -+ -+import ""github.com/prometheus/client_golang/prometheus"" -+ -+type Metrics struct { -+ herokuEntries *prometheus.CounterVec -+ herokuErrors *prometheus.CounterVec -+} -+ -+func NewMetrics(reg prometheus.Registerer) *Metrics { -+ var m Metrics -+ -+ m.herokuEntries = prometheus.NewCounterVec(prometheus.CounterOpts{ -+ Namespace: ""promtail"", -+ Name: ""heroku_drain_target_entries_total"", -+ Help: ""Number of successful entries received by the Heroku target"", -+ }, []string{}) -+ -+ m.herokuErrors = prometheus.NewCounterVec(prometheus.CounterOpts{ -+ Namespace: ""promtail"", -+ Name: ""heroku_drain_target_parsing_errors_total"", -+ Help: ""Number of parsing errors while receiving Heroku messages"", -+ }, []string{}) -+ -+ reg.MustRegister(m.herokuEntries, m.herokuErrors) -+ return &m -+} -diff --git a/clients/pkg/promtail/targets/heroku/target.go b/clients/pkg/promtail/targets/heroku/target.go -new file mode 100644 -index 0000000000000..d794e6c488288 ---- /dev/null -+++ b/clients/pkg/promtail/targets/heroku/target.go -@@ -0,0 +1,188 @@ -+package heroku -+ -+import ( -+ ""flag"" -+ ""fmt"" -+ ""net/http"" -+ ""strings"" -+ ""time"" -+ -+ ""github.com/go-kit/log"" -+ ""github.com/go-kit/log/level"" -+ herokuEncoding ""github.com/heroku/x/logplex/encoding"" -+ ""github.com/imdario/mergo"" -+ ""github.com/pkg/errors"" -+ ""github.com/prometheus/common/model"" -+ ""github.com/prometheus/prometheus/model/labels"" -+ ""github.com/prometheus/prometheus/model/relabel"" -+ ""github.com/weaveworks/common/logging"" -+ ""github.com/weaveworks/common/server"" -+ -+ ""github.com/grafana/loki/clients/pkg/promtail/api"" -+ ""github.com/grafana/loki/clients/pkg/promtail/scrapeconfig"" -+ ""github.com/grafana/loki/clients/pkg/promtail/targets/target"" -+ -+ ""github.com/grafana/loki/pkg/logproto"" -+ util_log ""github.com/grafana/loki/pkg/util/log"" -+) -+ -+type Target struct { -+ logger log.Logger -+ handler api.EntryHandler -+ config *scrapeconfig.HerokuDrainTargetConfig -+ jobName string -+ server *server.Server -+ metrics *Metrics -+ relabelConfigs []*relabel.Config -+} -+ -+// NewTarget creates a brand new Heroku Drain target, capable of receiving logs from a Heroku application through an HTTP drain. -+func NewTarget(metrics *Metrics, logger log.Logger, handler api.EntryHandler, jobName string, config *scrapeconfig.HerokuDrainTargetConfig, relabel []*relabel.Config) (*Target, error) { -+ wrappedLogger := log.With(logger, ""component"", ""heroku_drain"") -+ -+ ht := &Target{ -+ metrics: metrics, -+ logger: wrappedLogger, -+ handler: handler, -+ jobName: jobName, -+ config: config, -+ relabelConfigs: relabel, -+ } -+ -+ // Bit of a chicken and egg problem trying to register the defaults and apply overrides from the loaded config. -+ // First create an empty config and set defaults. -+ defaults := server.Config{} -+ defaults.RegisterFlags(flag.NewFlagSet(""empty"", flag.ContinueOnError)) -+ // Then apply any config values loaded as overrides to the defaults. -+ if err := mergo.Merge(&defaults, config.Server, mergo.WithOverride); err != nil { -+ return nil, errors.Wrap(err, ""failed to parse configs and override defaults when configuring heroku drain target"") -+ } -+ // The merge won't overwrite with a zero value but in the case of ports 0 value -+ // indicates the desire for a random port so reset these to zero if the incoming config val is 0 -+ if config.Server.HTTPListenPort == 0 { -+ defaults.HTTPListenPort = 0 -+ } -+ if config.Server.GRPCListenPort == 0 { -+ defaults.GRPCListenPort = 0 -+ } -+ // Set the config to the new combined config. -+ config.Server = defaults -+ -+ err := ht.run() -+ if err != nil { -+ return nil, err -+ } -+ -+ return ht, nil -+} -+ -+func (h *Target) run() error { -+ level.Info(h.logger).Log(""msg"", ""starting heroku drain target"", ""job"", h.jobName) -+ -+ // To prevent metric collisions because all metrics are going to be registered in the global Prometheus registry. -+ -+ tentativeServerMetricNamespace := ""promtail_heroku_drain_target_"" + h.jobName -+ if !model.IsValidMetricName(model.LabelValue(tentativeServerMetricNamespace)) { -+ return fmt.Errorf(""invalid prometheus-compatible job name: %s"", h.jobName) -+ } -+ h.config.Server.MetricsNamespace = tentativeServerMetricNamespace -+ -+ // We don't want the /debug and /metrics endpoints running, since this is not the main promtail HTTP server. -+ // We want this target to expose the least surface area possible, hence disabling WeaveWorks HTTP server metrics -+ // and debugging functionality. -+ h.config.Server.RegisterInstrumentation = false -+ -+ // Wrapping util logger with component-specific key vals, and the expected GoKit logging interface -+ h.config.Server.Log = logging.GoKit(log.With(util_log.Logger, ""component"", ""heroku_drain"")) -+ -+ srv, err := server.New(h.config.Server) -+ if err != nil { -+ return err -+ } -+ -+ h.server = srv -+ h.server.HTTP.Path(""/heroku/api/v1/drain"").Methods(""POST"").Handler(http.HandlerFunc(h.drain)) -+ -+ go func() { -+ err := srv.Run() -+ if err != nil { -+ level.Error(h.logger).Log(""msg"", ""heroku drain target shutdown with error"", ""err"", err) -+ } -+ }() -+ -+ return nil -+} -+ -+func (h *Target) drain(w http.ResponseWriter, r *http.Request) { -+ entries := h.handler.Chan() -+ defer r.Body.Close() -+ herokuScanner := herokuEncoding.NewDrainScanner(r.Body) -+ for herokuScanner.Scan() { -+ ts := time.Now() -+ message := herokuScanner.Message() -+ lb := labels.NewBuilder(nil) -+ lb.Set(""__heroku_drain_host"", message.Hostname) -+ lb.Set(""__heroku_drain_app"", message.Application) -+ lb.Set(""__heroku_drain_proc"", message.Process) -+ lb.Set(""__heroku_drain_log_id"", message.ID) -+ -+ if h.config.UseIncomingTimestamp { -+ ts = message.Timestamp -+ } -+ -+ processed := relabel.Process(lb.Labels(), h.relabelConfigs...) -+ -+ // Start with the set of labels fixed in the configuration -+ filtered := h.Labels().Clone() -+ for _, lbl := range processed { -+ if strings.HasPrefix(lbl.Name, ""__"") { -+ continue -+ } -+ filtered[model.LabelName(lbl.Name)] = model.LabelValue(lbl.Value) -+ } -+ -+ entries <- api.Entry{ -+ Labels: filtered, -+ Entry: logproto.Entry{ -+ Timestamp: ts, -+ Line: message.Message, -+ }, -+ } -+ h.metrics.herokuEntries.WithLabelValues().Inc() -+ } -+ err := herokuScanner.Err() -+ if err != nil { -+ h.metrics.herokuErrors.WithLabelValues().Inc() -+ level.Warn(h.logger).Log(""msg"", ""failed to read incoming heroku request"", ""err"", err.Error()) -+ http.Error(w, err.Error(), http.StatusBadRequest) -+ return -+ } -+ w.WriteHeader(http.StatusNoContent) -+} -+ -+func (h *Target) Type() target.TargetType { -+ return target.HerokuDrainTargetType -+} -+ -+func (h *Target) DiscoveredLabels() model.LabelSet { -+ return nil -+} -+ -+func (h *Target) Labels() model.LabelSet { -+ return h.config.Labels -+} -+ -+func (h *Target) Ready() bool { -+ return true -+} -+ -+func (h *Target) Details() interface{} { -+ return map[string]string{} -+} -+ -+func (h *Target) Stop() error { -+ level.Info(h.logger).Log(""msg"", ""stopping heroku drain target"", ""job"", h.jobName) -+ h.server.Shutdown() -+ h.handler.Stop() -+ return nil -+} -diff --git a/clients/pkg/promtail/targets/heroku/target_test.go b/clients/pkg/promtail/targets/heroku/target_test.go -new file mode 100644 -index 0000000000000..c3c308afeb72e ---- /dev/null -+++ b/clients/pkg/promtail/targets/heroku/target_test.go -@@ -0,0 +1,315 @@ -+package heroku -+ -+import ( -+ ""flag"" -+ ""fmt"" -+ ""net"" -+ ""net/http"" -+ ""os"" -+ ""strings"" -+ ""testing"" -+ ""time"" -+ -+ ""github.com/go-kit/log"" -+ ""github.com/google/uuid"" -+ ""github.com/prometheus/client_golang/prometheus"" -+ ""github.com/prometheus/common/model"" -+ ""github.com/prometheus/prometheus/model/relabel"" -+ ""github.com/stretchr/testify/require"" -+ ""github.com/weaveworks/common/server"" -+ -+ ""github.com/grafana/loki/clients/pkg/promtail/client/fake"" -+ ""github.com/grafana/loki/clients/pkg/promtail/scrapeconfig"" -+) -+ -+const localhost = ""127.0.0.1"" -+ -+const testPayload = `270 <158>1 2022-06-13T14:52:23.622778+00:00 host heroku router - at=info method=GET path=""/"" host=cryptic-cliffs-27764.herokuapp.com request_id=59da6323-2bc4-4143-8677-cc66ccfb115f fwd=""181.167.87.140"" dyno=web.1 connect=0ms service=3ms status=200 bytes=6979 protocol=https -+` -+const testLogLine1 = `140 <190>1 2022-06-13T14:52:23.621815+00:00 host app web.1 - [GIN] 2022/06/13 - 14:52:23 | 200 | 1.428101ms | 181.167.87.140 | GET ""/"" -+` -+const testLogLine1Timestamp = ""2022-06-13T14:52:23.621815+00:00"" -+const testLogLine2 = `156 <190>1 2022-06-13T14:52:23.827271+00:00 host app web.1 - [GIN] 2022/06/13 - 14:52:23 | 200 | 163.92µs | 181.167.87.140 | GET ""/static/main.css"" -+` -+ -+func makeDrainRequest(host string, bodies ...string) (*http.Request, error) { -+ req, err := http.NewRequest(http.MethodPost, fmt.Sprintf(""%s/heroku/api/v1/drain"", host), strings.NewReader(strings.Join(bodies, """"))) -+ if err != nil { -+ return nil, err -+ } -+ -+ drainToken := uuid.New().String() -+ frameID := uuid.New().String() -+ req.Header.Set(""Content-Type"", ""application/heroku_drain-1"") -+ req.Header.Set(""Logplex-Drain-Token"", fmt.Sprintf(""d.%s"", drainToken)) -+ req.Header.Set(""Logplex-Frame-Id"", frameID) -+ req.Header.Set(""Logplex-Msg-Count"", fmt.Sprintf(""%d"", len(bodies))) -+ -+ return req, nil -+} -+ -+func TestHerokuDrainTarget(t *testing.T) { -+ w := log.NewSyncWriter(os.Stderr) -+ logger := log.NewLogfmtLogger(w) -+ -+ type expectedEntry struct { -+ labels model.LabelSet -+ line string -+ } -+ type args struct { -+ RequestBodies []string -+ RelabelConfigs []*relabel.Config -+ Labels model.LabelSet -+ } -+ -+ cases := map[string]struct { -+ args args -+ expectedEntries []expectedEntry -+ }{ -+ ""heroku request with a single log line, internal labels dropped, and fixed are propagated"": { -+ args: args{ -+ RequestBodies: []string{testPayload}, -+ Labels: model.LabelSet{ -+ ""job"": ""some_job_name"", -+ }, -+ }, -+ expectedEntries: []expectedEntry{ -+ { -+ labels: model.LabelSet{ -+ ""job"": ""some_job_name"", -+ }, -+ line: `at=info method=GET path=""/"" host=cryptic-cliffs-27764.herokuapp.com request_id=59da6323-2bc4-4143-8677-cc66ccfb115f fwd=""181.167.87.140"" dyno=web.1 connect=0ms service=3ms status=200 bytes=6979 protocol=https -+`, -+ }, -+ }, -+ }, -+ ""heroku request with a two log lines, internal labels dropped, and fixed are propagated"": { -+ args: args{ -+ RequestBodies: []string{testLogLine1, testLogLine2}, -+ Labels: model.LabelSet{ -+ ""job"": ""multiple_line_job"", -+ }, -+ }, -+ expectedEntries: []expectedEntry{ -+ { -+ labels: model.LabelSet{ -+ ""job"": ""multiple_line_job"", -+ }, -+ line: `[GIN] 2022/06/13 - 14:52:23 | 200 | 1.428101ms | 181.167.87.140 | GET ""/"" -+`, -+ }, -+ { -+ labels: model.LabelSet{ -+ ""job"": ""multiple_line_job"", -+ }, -+ line: `[GIN] 2022/06/13 - 14:52:23 | 200 | 163.92µs | 181.167.87.140 | GET ""/static/main.css"" -+`, -+ }, -+ }, -+ }, -+ ""heroku request with a single log line, with internal labels relabeled, and fixed labels"": { -+ args: args{ -+ RequestBodies: []string{testLogLine1}, -+ Labels: model.LabelSet{ -+ ""job"": ""relabeling_job"", -+ }, -+ RelabelConfigs: []*relabel.Config{ -+ { -+ SourceLabels: model.LabelNames{""__heroku_drain_host""}, -+ TargetLabel: ""host"", -+ Replacement: ""$1"", -+ Action: relabel.Replace, -+ Regex: relabel.MustNewRegexp(""(.*)""), -+ }, -+ { -+ SourceLabels: model.LabelNames{""__heroku_drain_app""}, -+ TargetLabel: ""app"", -+ Replacement: ""$1"", -+ Action: relabel.Replace, -+ Regex: relabel.MustNewRegexp(""(.*)""), -+ }, -+ { -+ SourceLabels: model.LabelNames{""__heroku_drain_proc""}, -+ TargetLabel: ""procID"", -+ Replacement: ""$1"", -+ Action: relabel.Replace, -+ Regex: relabel.MustNewRegexp(""(.*)""), -+ }, -+ }, -+ }, -+ expectedEntries: []expectedEntry{ -+ { -+ line: `[GIN] 2022/06/13 - 14:52:23 | 200 | 1.428101ms | 181.167.87.140 | GET ""/"" -+`, -+ labels: model.LabelSet{ -+ ""host"": ""host"", -+ ""app"": ""app"", -+ ""procID"": ""web.1"", -+ }, -+ }, -+ }, -+ }, -+ } -+ for name, tc := range cases { -+ t.Run(name, func(t *testing.T) { -+ // Create fake promtail client -+ eh := fake.New(func() {}) -+ defer eh.Stop() -+ -+ serverConfig, port, err := getServerConfigWithAvailablePort() -+ require.NoError(t, err, ""error generating server config or finding open port"") -+ config := &scrapeconfig.HerokuDrainTargetConfig{ -+ Server: serverConfig, -+ Labels: tc.args.Labels, -+ UseIncomingTimestamp: false, -+ } -+ -+ prometheus.DefaultRegisterer = prometheus.NewRegistry() -+ metrics := NewMetrics(prometheus.DefaultRegisterer) -+ pt, err := NewTarget(metrics, logger, eh, ""test_job"", config, tc.args.RelabelConfigs) -+ require.NoError(t, err) -+ defer func() { -+ _ = pt.Stop() -+ }() -+ -+ // Clear received lines after test case is ran -+ defer eh.Clear() -+ -+ // Send some logs -+ ts := time.Now() -+ -+ req, err := makeDrainRequest(fmt.Sprintf(""http://%s:%d"", localhost, port), tc.args.RequestBodies...) -+ require.NoError(t, err, ""expected test drain request to be successfully created"") -+ res, err := http.DefaultClient.Do(req) -+ require.NoError(t, err) -+ require.Equal(t, http.StatusNoContent, res.StatusCode, ""expected no-content status code"") -+ -+ // Wait for them to appear in the test handler -+ countdown := 1000 -+ for len(eh.Received()) != 1 && countdown > 0 { -+ time.Sleep(1 * time.Millisecond) -+ countdown-- -+ } -+ -+ // Make sure we didn't timeout -+ require.Equal(t, len(tc.args.RequestBodies), len(eh.Received())) -+ -+ require.Equal(t, len(eh.Received()), len(tc.expectedEntries), ""expected to receive equal amount of expected label sets"") -+ for i, expectedEntry := range tc.expectedEntries { -+ // TODO: Add assertion over propagated timestamp -+ actualEntry := eh.Received()[i] -+ -+ require.Equal(t, expectedEntry.line, actualEntry.Line, ""expected line to be equal for %d-th entry"", i) -+ -+ expectedLS := expectedEntry.labels -+ actualLS := actualEntry.Labels -+ for label, value := range expectedLS { -+ require.Equal(t, expectedLS[label], actualLS[label], ""expected label %s to be equal to %s in %d-th entry"", label, value, i) -+ } -+ -+ // Timestamp is always set in the handler, we expect received timestamps to be slightly higher than the timestamp when we started sending logs. -+ require.GreaterOrEqual(t, actualEntry.Timestamp.Unix(), ts.Unix(), ""expected %d-th entry to have a received timestamp greater than publish time"", i) -+ } -+ }) -+ } -+} -+ -+func TestHerokuDrainTarget_UseIncomingTimestamp(t *testing.T) { -+ w := log.NewSyncWriter(os.Stderr) -+ logger := log.NewLogfmtLogger(w) -+ -+ // Create fake promtail client -+ eh := fake.New(func() {}) -+ defer eh.Stop() -+ -+ serverConfig, port, err := getServerConfigWithAvailablePort() -+ require.NoError(t, err, ""error generating server config or finding open port"") -+ config := &scrapeconfig.HerokuDrainTargetConfig{ -+ Server: serverConfig, -+ Labels: nil, -+ UseIncomingTimestamp: true, -+ } -+ -+ prometheus.DefaultRegisterer = prometheus.NewRegistry() -+ metrics := NewMetrics(prometheus.DefaultRegisterer) -+ pt, err := NewTarget(metrics, logger, eh, ""test_job"", config, nil) -+ require.NoError(t, err) -+ defer func() { -+ _ = pt.Stop() -+ }() -+ -+ // Clear received lines after test case is ran -+ defer eh.Clear() -+ -+ req, err := makeDrainRequest(fmt.Sprintf(""http://%s:%d"", localhost, port), testLogLine1) -+ require.NoError(t, err, ""expected test drain request to be successfully created"") -+ res, err := http.DefaultClient.Do(req) -+ require.NoError(t, err) -+ require.Equal(t, http.StatusNoContent, res.StatusCode, ""expected no-content status code"") -+ -+ // Wait for them to appear in the test handler -+ countdown := 1000 -+ for len(eh.Received()) != 1 && countdown > 0 { -+ time.Sleep(1 * time.Millisecond) -+ countdown-- -+ } -+ -+ // Make sure we didn't timeout -+ require.Equal(t, 1, len(eh.Received())) -+ -+ expectedTs, err := time.Parse(time.RFC3339Nano, testLogLine1Timestamp) -+ require.NoError(t, err, ""expected expected timestamp to be parse correctly"") -+ require.Equal(t, expectedTs, eh.Received()[0].Timestamp, ""expected entry timestamp to be overridden by received one"") -+} -+ -+func TestHerokuDrainTarget_ErrorOnNotPrometheusCompatibleJobName(t *testing.T) { -+ w := log.NewSyncWriter(os.Stderr) -+ logger := log.NewLogfmtLogger(w) -+ -+ // Create fake promtail client -+ eh := fake.New(func() {}) -+ defer eh.Stop() -+ -+ serverConfig, _, err := getServerConfigWithAvailablePort() -+ require.NoError(t, err, ""error generating server config or finding open port"") -+ config := &scrapeconfig.HerokuDrainTargetConfig{ -+ Server: serverConfig, -+ Labels: nil, -+ UseIncomingTimestamp: true, -+ } -+ -+ prometheus.DefaultRegisterer = prometheus.NewRegistry() -+ metrics := NewMetrics(prometheus.DefaultRegisterer) -+ pt, err := NewTarget(metrics, logger, eh, ""test-job"", config, nil) -+ require.Error(t, err, ""expected an error from creating a heroku target with an invalid job name"") -+ // Cleanup target in the case test failed and target started correctly -+ if err == nil { -+ _ = pt.Stop() -+ } -+} -+ -+func getServerConfigWithAvailablePort() (cfg server.Config, port int, err error) { -+ // Get a randomly available port by open and closing a TCP socket -+ addr, err := net.ResolveTCPAddr(""tcp"", localhost+"":0"") -+ if err != nil { -+ return -+ } -+ l, err := net.ListenTCP(""tcp"", addr) -+ if err != nil { -+ return -+ } -+ port = l.Addr().(*net.TCPAddr).Port -+ err = l.Close() -+ if err != nil { -+ return -+ } -+ -+ // Adjust some of the defaults -+ cfg.RegisterFlags(flag.NewFlagSet(""empty"", flag.ContinueOnError)) -+ cfg.HTTPListenAddress = localhost -+ cfg.HTTPListenPort = port -+ cfg.GRPCListenAddress = localhost -+ cfg.GRPCListenPort = 0 // Not testing GRPC, a random port will be assigned -+ -+ return -+} -diff --git a/clients/pkg/promtail/targets/heroku/targetmanager.go b/clients/pkg/promtail/targets/heroku/targetmanager.go -new file mode 100644 -index 0000000000000..5d046ca3d45cf ---- /dev/null -+++ b/clients/pkg/promtail/targets/heroku/targetmanager.go -@@ -0,0 +1,75 @@ -+package heroku -+ -+import ( -+ ""github.com/go-kit/log"" -+ ""github.com/go-kit/log/level"" -+ ""github.com/prometheus/client_golang/prometheus"" -+ -+ ""github.com/grafana/loki/clients/pkg/logentry/stages"" -+ ""github.com/grafana/loki/clients/pkg/promtail/api"" -+ ""github.com/grafana/loki/clients/pkg/promtail/scrapeconfig"" -+ ""github.com/grafana/loki/clients/pkg/promtail/targets/target"" -+) -+ -+type TargetManager struct { -+ logger log.Logger -+ targets map[string]*Target -+} -+ -+func NewHerokuDrainTargetManager( -+ metrics *Metrics, -+ reg prometheus.Registerer, -+ logger log.Logger, -+ client api.EntryHandler, -+ scrapeConfigs []scrapeconfig.Config) (*TargetManager, error) { -+ -+ tm := &TargetManager{ -+ logger: logger, -+ targets: make(map[string]*Target), -+ } -+ -+ for _, cfg := range scrapeConfigs { -+ pipeline, err := stages.NewPipeline(log.With(logger, ""component"", ""heroku_drain_pipeline_""+cfg.JobName), cfg.PipelineStages, &cfg.JobName, reg) -+ if err != nil { -+ return nil, err -+ } -+ -+ t, err := NewTarget(metrics, logger, pipeline.Wrap(client), cfg.JobName, cfg.HerokuDrainConfig, cfg.RelabelConfigs) -+ if err != nil { -+ return nil, err -+ } -+ -+ tm.targets[cfg.JobName] = t -+ } -+ -+ return tm, nil -+} -+ -+func (hm *TargetManager) Ready() bool { -+ for _, t := range hm.targets { -+ if t.Ready() { -+ return true -+ } -+ } -+ return false -+} -+ -+func (hm *TargetManager) Stop() { -+ for name, t := range hm.targets { -+ if err := t.Stop(); err != nil { -+ level.Error(t.logger).Log(""event"", ""failed to stop heroku drain target"", ""name"", name, ""cause"", err) -+ } -+ } -+} -+ -+func (hm *TargetManager) ActiveTargets() map[string][]target.Target { -+ return hm.AllTargets() -+} -+ -+func (hm *TargetManager) AllTargets() map[string][]target.Target { -+ res := make(map[string][]target.Target, len(hm.targets)) -+ for k, v := range hm.targets { -+ res[k] = []target.Target{v} -+ } -+ return res -+} -diff --git a/clients/pkg/promtail/targets/manager.go b/clients/pkg/promtail/targets/manager.go -index ef7d12504e90c..b2ea636c0e5a7 100644 ---- a/clients/pkg/promtail/targets/manager.go -+++ b/clients/pkg/promtail/targets/manager.go -@@ -16,6 +16,7 @@ import ( - ""github.com/grafana/loki/clients/pkg/promtail/targets/file"" - ""github.com/grafana/loki/clients/pkg/promtail/targets/gcplog"" - ""github.com/grafana/loki/clients/pkg/promtail/targets/gelf"" -+ ""github.com/grafana/loki/clients/pkg/promtail/targets/heroku"" - ""github.com/grafana/loki/clients/pkg/promtail/targets/journal"" - ""github.com/grafana/loki/clients/pkg/promtail/targets/kafka"" - ""github.com/grafana/loki/clients/pkg/promtail/targets/lokipush"" -@@ -37,6 +38,7 @@ const ( - CloudflareConfigs = ""cloudflareConfigs"" - DockerConfigs = ""dockerConfigs"" - DockerSDConfigs = ""dockerSDConfigs"" -+ HerokuDrainConfigs = ""herokuDrainConfigs"" - ) - - type targetManager interface { -@@ -96,6 +98,8 @@ func NewTargetManagers( - targetScrapeConfigs[CloudflareConfigs] = append(targetScrapeConfigs[CloudflareConfigs], cfg) - case cfg.DockerSDConfigs != nil: - targetScrapeConfigs[DockerSDConfigs] = append(targetScrapeConfigs[DockerSDConfigs], cfg) -+ case cfg.HerokuDrainConfig != nil: -+ targetScrapeConfigs[HerokuDrainConfigs] = append(targetScrapeConfigs[HerokuDrainConfigs], cfg) - default: - return nil, fmt.Errorf(""no valid target scrape config defined for %q"", cfg.JobName) - } -@@ -116,13 +120,14 @@ func NewTargetManagers( - } - - var ( -- fileMetrics *file.Metrics -- syslogMetrics *syslog.Metrics -- gcplogMetrics *gcplog.Metrics -- gelfMetrics *gelf.Metrics -- cloudflareMetrics *cloudflare.Metrics -- dockerMetrics *docker.Metrics -- journalMetrics *journal.Metrics -+ fileMetrics *file.Metrics -+ syslogMetrics *syslog.Metrics -+ gcplogMetrics *gcplog.Metrics -+ gelfMetrics *gelf.Metrics -+ cloudflareMetrics *cloudflare.Metrics -+ dockerMetrics *docker.Metrics -+ journalMetrics *journal.Metrics -+ herokuDrainMetrics *heroku.Metrics - ) - if len(targetScrapeConfigs[FileScrapeConfigs]) > 0 { - fileMetrics = file.NewMetrics(reg) -@@ -145,6 +150,9 @@ func NewTargetManagers( - if len(targetScrapeConfigs[JournalScrapeConfigs]) > 0 { - journalMetrics = journal.NewMetrics(reg) - } -+ if len(targetScrapeConfigs[HerokuDrainConfigs]) > 0 { -+ herokuDrainMetrics = heroku.NewMetrics(reg) -+ } - - for target, scrapeConfigs := range targetScrapeConfigs { - switch target { -@@ -214,6 +222,12 @@ func NewTargetManagers( - return nil, errors.Wrap(err, ""failed to make Loki Push API target manager"") - } - targetManagers = append(targetManagers, pushTargetManager) -+ case HerokuDrainConfigs: -+ herokuDrainTargetManager, err := heroku.NewHerokuDrainTargetManager(herokuDrainMetrics, reg, logger, client, scrapeConfigs) -+ if err != nil { -+ return nil, errors.Wrap(err, ""failed to make Heroku drain target manager"") -+ } -+ targetManagers = append(targetManagers, herokuDrainTargetManager) - case WindowsEventsConfigs: - windowsTargetManager, err := windows.NewTargetManager(reg, logger, client, scrapeConfigs) - if err != nil { -diff --git a/clients/pkg/promtail/targets/target/target.go b/clients/pkg/promtail/targets/target/target.go -index ecb021c69ff90..5e8ad465aa778 100644 ---- a/clients/pkg/promtail/targets/target/target.go -+++ b/clients/pkg/promtail/targets/target/target.go -@@ -41,6 +41,9 @@ const ( - - // DockerTargetType is a Docker target - DockerTargetType = TargetType(""Docker"") -+ -+ // HerokuDrainTargetType is a Heroku Logs target -+ HerokuDrainTargetType = TargetType(""HerokuDrain"") - ) - - // Target is a promtail scrape target -diff --git a/docs/sources/clients/promtail/configuration.md b/docs/sources/clients/promtail/configuration.md -index 1cbb8e0517bb6..d9370b74e6e2c 100644 ---- a/docs/sources/clients/promtail/configuration.md -+++ b/docs/sources/clients/promtail/configuration.md -@@ -339,6 +339,9 @@ job_name: - # Configuration describing how to pull logs from Cloudflare. - [cloudflare: ] - -+# Configuration describing how to pull logs from a Heroku LogPlex drain. -+[heroku_drain: ] -+ - # Describes how to relabel targets to determine if they should - # be processed. - relabel_configs: -@@ -1194,6 +1197,43 @@ All Cloudflare logs are in JSON. Here is an example: - - You can leverage [pipeline stages](pipeline_stages) if, for example, you want to parse the JSON log line and extract more labels or change the log line format. - -+### heroku_drain -+ -+The `heroku_drain` block configures Promtail to expose a [Heroku HTTPS Drain](https://devcenter.heroku.com/articles/log-drains#https-drains). -+ -+Each job configured with a Heroku Drain will expose a Drain and will require a separate port. -+ -+The `server` configuration is the same as [server](#server), since Promtail exposes an HTTP server for each new drain. -+ -+Promtail exposes an endpoint at `/heroku/api/v1/drain`, which expects requests from Heroku's log delivery. -+ -+```yaml -+# The Heroku drain server configuration options -+[server: ] -+ -+# Label map to add to every log message. -+labels: -+ [ : ... ] -+ -+# Whether Promtail should pass on the timestamp from the incoming Heroku drain message. -+# When false, or if no timestamp is present in the syslog message, Promtail will assign the current -+# timestamp to the log when it was processed. -+[use_incoming_timestamp: | default = false] -+ -+``` -+ -+#### Available Labels -+ -+Heroku Log drains send logs in [Syslog-formatted messages](https://datatracker.ietf.org/doc/html/rfc5424#section-6) (with -+some [minor tweaks](https://devcenter.heroku.com/articles/log-drains#https-drain-caveats); they are not RFC-compatible). -+ -+The Heroku Drain target exposes for each log entry the received syslog fields with the following labels: -+ -+- `__heroku_drain_host`: The [HOSTNAME](https://tools.ietf.org/html/rfc5424#section-6.2.4) field parsed from the message. -+- `__heroku_drain_app`: The [APP-NAME](https://tools.ietf.org/html/rfc5424#section-6.2.5) field parsed from the message. -+- `__heroku_drain_proc`: The [PROCID](https://tools.ietf.org/html/rfc5424#section-6.2.6) field parsed from the message. -+- `__heroku_drain_log_id`: The [MSGID](https://tools.ietf.org/html/rfc5424#section-6.2.7) field parsed from the message. -+ - ### relabel_configs - - Relabeling is a powerful tool to dynamically rewrite the label set of a target -diff --git a/docs/sources/clients/promtail/scraping.md b/docs/sources/clients/promtail/scraping.md -index c530a57869b3d..e0df0fa083461 100644 ---- a/docs/sources/clients/promtail/scraping.md -+++ b/docs/sources/clients/promtail/scraping.md -@@ -379,6 +379,52 @@ scrape_configs: - Only `api_token` and `zone_id` are required. - Refer to the [Cloudfare](../../configuration/#cloudflare) configuration section for details. - -+## Heroku Drain -+Promtail supports receiving logs from a Heroku application by using a [Heroku HTTPS Drain](https://devcenter.heroku.com/articles/log-drains#https-drains). -+Configuration is specified in a`heroku_drain` block within the Promtail `scrape_config` configuration. -+ -+```yaml -+- job_name: heroku_drain -+ heroku_drain: -+ server: -+ http_listen_address: 0.0.0.0 -+ http_listen_port: 8080 -+ labels: -+ job: heroku_drain_docs -+ use_incoming_timestamp: true -+ relabel_configs: -+ - source_labels: ['__heroku_drain_host'] -+ target_label: 'host' -+ - source_labels: ['__heroku_drain_app'] -+ target_label: 'app' -+ - source_labels: ['__heroku_drain_proc'] -+ target_label: 'proc' -+ - source_labels: ['__heroku_drain_log_id'] -+ target_label: 'log_id' -+``` -+Within the `scrape_configs` configuration for a Heroku Drain target, the `job_name` must be a Prometheus-compatible [metric name](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). -+ -+The [server](../configuration.md#server) section configures the HTTP server created for receiving logs. -+`labels` defines a static set of label values added to each received log entry. `use_incoming_timestamp` can be used to pass -+the timestamp received from Heroku. -+ -+Before using a `heroku_drain` target, Heroku should be configured with the URL where the Promtail instance will be listening. -+Follow the steps in [Heroku HTTPS Drain docs](https://devcenter.heroku.com/articles/log-drains#https-drains) for using the Heroku CLI -+with a command like the following: -+ -+``` -+heroku drains:add [http|https]://HOSTNAME:8080/heroku/api/v1/drain -a HEROKU_APP_NAME -+``` -+ -+It also supports `relabeling` and `pipeline` stages just like other targets. -+ -+When Promtail receives Heroku Drain logs, various internal labels are made available for [relabeling](#relabeling): -+- `__heroku_drain_host` -+- `__heroku_drain_app` -+- `__heroku_drain_proc` -+- `__heroku_drain_log_id` -+In the example above, the `project_id` label from a GCP resource was transformed into a label called `project` through `relabel_configs`. -+ - ## Relabeling - - Each `scrape_configs` entry can contain a `relabel_configs` stanza. -diff --git a/go.mod b/go.mod -index c8f3e12c9846f..075a2d9221161 100644 ---- a/go.mod -+++ b/go.mod -@@ -114,6 +114,7 @@ require ( - - require ( - github.com/grafana/groupcache_exporter v0.0.0-20220629095919-59a8c6428a43 -+ github.com/heroku/x v0.0.50 - github.com/mailgun/groupcache/v2 v2.3.2 - github.com/prometheus/alertmanager v0.24.0 - github.com/prometheus/common/sigv4 v0.1.0 -diff --git a/go.sum b/go.sum -index 5b6855026e183..b2b16c1aced7a 100644 ---- a/go.sum -+++ b/go.sum -@@ -65,6 +65,7 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 - cloud.google.com/go/trace v0.1.0/go.mod h1:wxEwsoeRVPbeSkt7ZC9nWCgmoKQRAoySN7XHW2AmI7g= - code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= - collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= -+contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs= - dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= - github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= - github.com/Azure/azure-event-hubs-go/v3 v3.2.0/go.mod h1:BPIIJNH/l/fVHYq3Rm6eg4clbrULrQ3q7+icmqHyyLc= -@@ -177,6 +178,7 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx - github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae h1:ePgznFqEG1v3AjMklnK8H7BSc++FDSo7xfK9K7Af+0Y= - github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae/go.mod h1:/cvHQkZ1fst0EmZnA5dFtiQdWCNCFYzb+uE2vqVgvx0= - github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -+github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= - github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= - github.com/Workiva/go-datastructures v1.0.53 h1:J6Y/52yX10Xc5JjXmGtWoSSxs3mZnGSaq37xZZh7Yig= - github.com/Workiva/go-datastructures v1.0.53/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= -@@ -214,6 +216,7 @@ github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD - github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= - github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= - github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -+github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= - github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= - github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= - github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -@@ -227,6 +230,8 @@ github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:W - github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= - github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= - github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -+github.com/aws/aws-lambda-go v1.27.0/go.mod h1:jJmlefzPfGnckuHdXX7/80O3BvUUi12XOkbv4w9SGLU= -+github.com/aws/aws-sdk-go v1.13.10/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= - github.com/aws/aws-sdk-go v1.15.24/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= - github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= - github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -@@ -243,6 +248,7 @@ github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZw - github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= - github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= - github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -+github.com/axiomhq/hyperloglog v0.0.0-20180317131949-fe9507de0228/go.mod h1:IOXAcuKIFq/mDyuQ4wyJuJ79XLMsmLM+5RdQ+vWrL7o= - github.com/baidubce/bce-sdk-go v0.9.81 h1:n8KfThLG9fvGv3A+RtTt/jKhg/FPPRpo+iNnS2r+iPI= - github.com/baidubce/bce-sdk-go v0.9.81/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= - github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -@@ -310,6 +316,7 @@ github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMe - github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= - github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= - github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -+github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= - github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= - github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= - github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -@@ -369,6 +376,7 @@ github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8l - github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= - github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= - github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= -+github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= - github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= - github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= - github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -@@ -489,10 +497,12 @@ github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmC - github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96/go.mod h1:uY+1eqFUyotrQxF1wYFNtMeHp/swbYRsoGzfcPZ8x3o= - github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= - github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -+github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= - github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= - github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= - github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= - github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -+github.com/go-ini/ini v1.33.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= - github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= - github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= - github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -@@ -517,6 +527,8 @@ github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV - github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= - github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= - github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -+github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -+github.com/go-ole/go-ole v1.2.6-0.20210915003542-8b1f7f90f6b1/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= - github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= - github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= - github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -@@ -742,6 +754,7 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW - github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= - github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= - github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -+github.com/gomodule/redigo v1.8.1/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= - github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= - github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= - github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -@@ -773,6 +786,7 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ - github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= - github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= - github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -+github.com/google/gops v0.3.22/go.mod h1:7diIdLsqpCihPSX3fQagksT/Ku/y4RL9LHTlKyEUDl8= - github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= - github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= - github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -@@ -857,6 +871,7 @@ github.com/grafana/tail v0.0.0-20220426200921-98e8eb28ea4c/go.mod h1:GIMXMPB/lRA - github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= - github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= - github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= -+github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= - github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= - github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= - github.com/grpc-ecosystem/go-grpc-middleware/providers/kit/v2 v2.0.0-20201002093600-73cf2ae9d891/go.mod h1:516cTXxZzi4NBUBbKcwmO4Eqbb6GHAEd3o4N+GYyCBY= -@@ -864,6 +879,7 @@ github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-20200501113911-9a95f0fdbf - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7 h1:guQyUpELu4I0wKgdsRBZDA5blfGiUleuppRSVy9Qbi0= - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7/go.mod h1:GhphxcdlaRyAuBSvo6rV71BvQcvB/vuX8ugCyybuS2k= - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -+github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= - github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= - github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -@@ -955,6 +971,9 @@ github.com/hashicorp/vault v0.10.3/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bA - github.com/hashicorp/vault-plugin-secrets-kv v0.0.0-20190318174639-195e0e9d07f1/go.mod h1:VJHHT2SC1tAPrfENQeBhLlb5FbZoKZM+oC/ROmEftz0= - github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= - github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -+github.com/heroku/rollrus v0.2.0/go.mod h1:B3MwEcr9nmf4xj0Sr5l9eSht7wLKMa1C+9ajgAU79ek= -+github.com/heroku/x v0.0.50 h1:CA0AXkSumucVJD+T+x+6c7X1iDEb+40F8GNgH5UjJwo= -+github.com/heroku/x v0.0.50/go.mod h1:vr+jORZ6sG3wgEq2FAS6UbOUrz9/DxpQGN/xPHVgbSM= - github.com/hetznercloud/hcloud-go v1.33.2 h1:ptWKVYLW7YtjXzsqTFKFxwpVo3iM9UMkVPBYQE4teLU= - github.com/hetznercloud/hcloud-go v1.33.2/go.mod h1:XX/TQub3ge0yWR2yHWmnDVIrB+MQbda1pHxkUmDlUME= - github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= -@@ -962,6 +981,7 @@ github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs - github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= - github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= - github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo= -+github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= - github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= - github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= - github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -@@ -1009,6 +1029,7 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y - github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= - github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= - github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -+github.com/joeshaw/envdecode v0.0.0-20180129163420-d5f34bca07f3/go.mod h1:Q+alOFAXgW5SrcfMPt/G4B2oN+qEcQRJjkn/f4mKL04= - github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= - github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= - github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -@@ -1048,6 +1069,7 @@ github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0Lh - github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= - github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= - github.com/keybase/go-crypto v0.0.0-20180614160407-5114a9a81e1b/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= -+github.com/keybase/go-ps v0.0.0-20190827175125-91aafc93ba19/go.mod h1:hY+WOq6m2FpbvyrI93sMaypsttvaIL5nhVR92dTMUcQ= - github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= - github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= - github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -@@ -1087,6 +1109,7 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+ - github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= - github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= - github.com/leanovate/gopter v0.2.4/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= -+github.com/leesper/go_rng v0.0.0-20171009123644-5344a9259b21/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U= - github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U= - github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= - github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -@@ -1100,6 +1123,7 @@ github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0U - github.com/linode/linodego v1.6.0 h1:y3KgXttj0v6V3HyGtsvdkTl0gIzaAAOdrDXCIwGeh2g= - github.com/linode/linodego v1.6.0/go.mod h1:9lmhBsOupR6ke7D9Ioj1bq/ny9pfgFkCLiX7ubq0r08= - github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= -+github.com/lstoll/grpce v1.7.0/go.mod h1:XiCWl3R+avNCT7KsTjv3qCblgsSqd0SC4ymySrH226g= - github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= - github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= - github.com/lyft/protoc-gen-validate v0.0.0-20180911180927-64fcb82c878e/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -@@ -1322,6 +1346,7 @@ github.com/pierrec/lz4/v4 v4.1.12 h1:44l88ehTZAUGW4VlO1QC4zkilL99M6Y9MXNwEs0uzP8 - github.com/pierrec/lz4/v4 v4.1.12/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= - github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= - github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -+github.com/pkg/errors v0.8.2-0.20190227000051-27936f6d90f9/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= - github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= - github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= - github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -@@ -1397,6 +1422,8 @@ github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0 - github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= - github.com/prometheus/prometheus v0.36.2-0.20220613200027-59727ab0eb48 h1:b0KPmtNBsvMIfW6fryLG8yRSj3Ye3uHdQEyoTJmO/wM= - github.com/prometheus/prometheus v0.36.2-0.20220613200027-59727ab0eb48/go.mod h1:evpqrqffGRI38M1zH3IHpmXTeho8IfX5Qpx6Ixpqhyk= -+github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1/go.mod h1:JaY6n2sDr+z2WTsXkOmNRUfDy6FN0L6Nk7x06ndm4tY= -+github.com/rcrowley/go-metrics v0.0.0-20160613154715-cfa5a85e9f0a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= - github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= - github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= - github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= -@@ -1408,6 +1435,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L - github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= - github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= - github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -+github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ= -+github.com/rollbar/rollbar-go v1.2.0/go.mod h1:czC86b8U4xdUH7W2C6gomi2jutLm8qK0OtrF5WMvpcc= - github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= - github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= - github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= -@@ -1434,6 +1463,7 @@ github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJP - github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= - github.com/shirou/gopsutil v0.0.0-20181107111621-48177ef5f880/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= - github.com/shirou/gopsutil v2.20.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -+github.com/shirou/gopsutil/v3 v3.21.9/go.mod h1:YWp/H8Qs5fVmf17v7JNZzA0mPJ+mS2e9JdiUF9LlKzQ= - github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= - github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= - github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -@@ -1458,6 +1488,7 @@ github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf/go.mod h1 - github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= - github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= - github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= -+github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= - github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= - github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= - github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= -@@ -1466,6 +1497,7 @@ github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5Q - github.com/sony/gobreaker v0.4.1 h1:oMnRNZXX5j85zso6xCPRNPtmAycat+WcoKbklScLDgQ= - github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= - github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a/go.mod h1:LeFCbQYJ3KJlPs/FvPz2dy1tkpxyeNESVyCNNzRXFR0= -+github.com/soveran/redisurl v0.0.0-20180322091936-eb325bc7a4b8/go.mod h1:FVJ8jbHu7QrNFs3bZEsv/L5JjearIAY9N0oXh2wk+6Y= - github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= - github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= - github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -@@ -1474,6 +1506,7 @@ github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= - github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= - github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= - github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -+github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= - github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= - github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= - github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -@@ -1517,6 +1550,8 @@ github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1C - github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= - github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= - github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= -+github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= -+github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= - github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= - github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 h1:hbyjqt5UnyKeOT3rFVxLxi7iTI6XqR2p4TkwEAQdUiw= - github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:Q5IRRDY+cjIaiOjTAnXN5LKQV5MPqVx5ofQn85Jy5Yw= -@@ -1534,9 +1569,12 @@ github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= - github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= - github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= - github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -+github.com/unrolled/secure v1.0.1/go.mod h1:R6rugAuzh4TQpbFAq69oqZggyBQxFRFQIewtz5z7Jsc= - github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -+github.com/urfave/cli v1.21.0/go.mod h1:lxDj6qX9Q6lWQxIrbrT0nwecwUtRnhVZAJjJZrVUZZQ= - github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= - github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -+github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= - github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= - github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= - github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -@@ -1616,6 +1654,7 @@ go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= - go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= - go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= - go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -+go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= - go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= - go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= - go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -@@ -1624,18 +1663,38 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= - go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0 h1:mac9BKRqwaX6zxHPDe3pvmWpwuuIM0vuXv2juCnQevE= - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0/go.mod h1:5eCOqeGphOyz6TsY3ZDNjE33SM/TFAK3RGuCL2naTgY= -+go.opentelemetry.io/otel v1.0.0-RC1/go.mod h1:x9tRa9HK4hSSq7jf2TKbqFbtt58/TGk0f9XiEYISI1I= -+go.opentelemetry.io/otel v1.0.0-RC3/go.mod h1:Ka5j3ua8tZs4Rkq4Ex3hwgBgOchyPVq5S6P2lz//nKQ= - go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM= - go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0/go.mod h1:M1hVZHNxcbkAlcvrOMlpQ4YOO3Awf+4N2dxkZL3xm04= -+go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.22.0/go.mod h1:gIp6+vQxqmh6Vd/mucqnsaFpOuVycQAS/BBXMKzJk0w= -+go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0/go.mod h1:pe9oOWRaZyapdajWCn64fnl76v3cmTEmNBgh7MkKvwE= -+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0/go.mod h1:aSP5oMNaAfOYq+sRydHANZ0vBYLyZR/3lR9pru9aPLk= -+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.22.0/go.mod h1:FX3Hvv/Hwb4WtlrAvco4QCsfxsaqg/UoYA6WFCnT1Ro= - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0/go.mod h1:ceUgdyfNv4h4gLxHR0WNfDiiVmZFodZhZSbOLhpxqXE= - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0/go.mod h1:E+/KKhwOSw8yoPxSSuUHG6vKppkvhN+S1Jc7Nib3k3o= - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.7.0/go.mod h1:aFXT9Ng2seM9eizF+LfKiyPBGy8xIZKwhusC1gIu3hA= -+go.opentelemetry.io/otel/internal/metric v0.22.0/go.mod h1:7qVuMihW/ktMonEfOvBXuh6tfMvvEyoIDgeJNRloYbQ= -+go.opentelemetry.io/otel/internal/metric v0.23.0/go.mod h1:z+RPiDJe30YnCrOhFGivwBS+DU1JU/PiLKkk4re2DNY= -+go.opentelemetry.io/otel/metric v0.22.0/go.mod h1:KcsUkBiYGW003DJ+ugd2aqIRIfjabD9jeOUXqsAtrq0= -+go.opentelemetry.io/otel/metric v0.23.0/go.mod h1:G/Nn9InyNnIv7J6YVkQfpc0JCfKBNJaERBGw08nqmVQ= - go.opentelemetry.io/otel/metric v0.30.0 h1:Hs8eQZ8aQgs0U49diZoaS6Uaxw3+bBE3lcMUKBFIk3c= - go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU= -+go.opentelemetry.io/otel/oteltest v1.0.0-RC1/go.mod h1:+eoIG0gdEOaPNftuy1YScLr1Gb4mL/9lpDkZ0JjMRq4= -+go.opentelemetry.io/otel/sdk v1.0.0-RC1/go.mod h1:kj6yPn7Pgt5ByRuwesbaWcRLA+V7BSDg3Hf8xRvsvf8= -+go.opentelemetry.io/otel/sdk v1.0.0-RC3/go.mod h1:78H6hyg2fka0NYT9fqGuFLvly2yCxiBXDJAgLKo/2Us= - go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU= -+go.opentelemetry.io/otel/sdk/export/metric v0.22.0/go.mod h1:a14rf2CiHSn9xjB6cHuv0HoZGl5C4w2PAgl+Lja1VzU= -+go.opentelemetry.io/otel/sdk/export/metric v0.23.0/go.mod h1:SuMiREmKVRIwFKq73zvGTvwFpxb/ZAYkMfyqMoOtDqs= -+go.opentelemetry.io/otel/sdk/metric v0.22.0/go.mod h1:LzkI0G0z6KhEagqmzgk3bw/dglE2Tk2OXs455UMcI0s= -+go.opentelemetry.io/otel/sdk/metric v0.23.0/go.mod h1:wa0sKK13eeIFW+0OFjcC3S1i7FTRRiLAXe1kjBVbhwg= -+go.opentelemetry.io/otel/trace v1.0.0-RC1/go.mod h1:86UHmyHWFEtWjfWPSbu0+d0Pf9Q6e1U+3ViBOc+NXAg= -+go.opentelemetry.io/otel/trace v1.0.0-RC3/go.mod h1:VUt2TUYd8S2/ZRX09ZDFZQwn2RqfMB5MzO17jBojGxo= - go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o= - go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= - go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -+go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= - go.opentelemetry.io/proto/otlp v0.16.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= - go.starlark.net v0.0.0-20200901195727-6e684ef5eeee/go.mod h1:f0znQkUKRrkk36XxWbGjMqQM8wGv/xHBVE2qc3B5oFU= - go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -@@ -1753,6 +1812,7 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r - golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= - golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= - golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -+golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= - golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= - golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= - golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -@@ -1893,12 +1953,15 @@ golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7w - golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= - golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= - golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -+golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= - golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= - golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -+golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= - golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= - golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= - golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= - golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= - golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= - golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= - golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -@@ -1963,14 +2026,17 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc - golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -+golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -+golang.org/x/sys v0.0.0-20210902050250-f475640dd07b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -+golang.org/x/sys v0.0.0-20211102061401-a2f17f7b995c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -@@ -2101,6 +2167,7 @@ golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNq - golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4= - golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:UdS9frhv65KTfwxME1xE8+rHYoFpbm36gOud1GhBe9c= - gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -+gonum.org/v1/gonum v0.0.0-20190502212712-4a2eb0188cbc/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= - gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= - gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= - gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -@@ -2160,12 +2227,14 @@ google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6 - google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= - google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= - google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -+google.golang.org/genproto v0.0.0-20181221175505-bd9b4fb69e2f/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= - google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= - google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= - google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= - google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= - google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= - google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -+google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= - google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= - google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= - google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -@@ -2247,6 +2316,7 @@ google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8/go.mod h1:yKyY4AMR - google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= - google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= - google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -+google.golang.org/grpc/examples v0.0.0-20210916203835-567da6b86340/go.mod h1:gID3PKrg7pWKntu9Ss6zTLJ0ttC0X9IHgREOCZwbCVU= - google.golang.org/grpc/examples v0.0.0-20211119005141-f45e61797429/go.mod h1:gID3PKrg7pWKntu9Ss6zTLJ0ttC0X9IHgREOCZwbCVU= - google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= - google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -@@ -2267,6 +2337,7 @@ gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4 - gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= - gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= - gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= -+gopkg.in/caio/go-tdigest.v2 v2.3.0/go.mod h1:HPfh/CLN8UWDMOC76lqxVeKa5E24ypoVuTj4BLMb9cU= - gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= - gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -@@ -2290,6 +2361,7 @@ gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWd - gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I= - gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= - gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -+gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= - gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= - gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= - gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= -@@ -2369,6 +2441,7 @@ modernc.org/sqlite v1.7.4/go.mod h1:xse4RHCm8Fzw0COf5SJqAyiDrVeDwAQthAS1V/woNIA= - modernc.org/tcl v1.4.1/go.mod h1:8YCvzidU9SIwkz7RZwlCWK61mhV8X9UwfkRDRp7y5e0= - rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= - rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -+rsc.io/goversion v1.2.0/go.mod h1:Eih9y/uIBS3ulggl7KNJ09xGSLcuNaLgmvvqa07sgfo= - rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= - rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= - rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -diff --git a/vendor/github.com/heroku/x/LICENSE.txt b/vendor/github.com/heroku/x/LICENSE.txt -new file mode 100644 -index 0000000000000..f6c9ce3e1de02 ---- /dev/null -+++ b/vendor/github.com/heroku/x/LICENSE.txt -@@ -0,0 +1,27 @@ -+Copyright (c) 2018, Salesforce.com, Inc. -+All rights reserved. -+ -+Redistribution and use in source and binary forms, with or without modification, -+are permitted provided that the following conditions are met: -+ -+* Redistributions of source code must retain the above copyright notice, this -+list of conditions and the following disclaimer. -+ -+* Redistributions in binary form must reproduce the above copyright notice, this -+list of conditions and the following disclaimer in the documentation and/or -+other materials provided with the distribution. -+ -+* Neither the name of Salesforce.com nor the names of its contributors may be -+used to endorse or promote products derived from this software without specific -+prior written permission. -+ -+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ""AS IS"" AND -+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -\ No newline at end of file -diff --git a/vendor/github.com/heroku/x/logplex/encoding/README.md b/vendor/github.com/heroku/x/logplex/encoding/README.md -new file mode 100644 -index 0000000000000..f4c069f20a8c5 ---- /dev/null -+++ b/vendor/github.com/heroku/x/logplex/encoding/README.md -@@ -0,0 +1,47 @@ -+# logplex/encoding -+ -+## What's this? -+ -+A set of libraries we use to parse messages, and to also publish these same -+syslog RFC5424 messages. -+ -+## How to use? -+ -+We have 2 scanners available. If you're trying to build a logplex compatible ingress, -+you can use the regular scanner. -+ -+### Scanner -+ -+```go -+func handler(w http.ResponseWriter, r *http.Request) { -+ s := NewScanner(r.Body) -+ -+ for s.Scan() { -+ log.Printf(""%+v"", scanner.Message()) -+ } -+ -+ if s.Err() != nil { -+ log.Printf(""err: %v"", s.Err()) -+ } -+} -+``` -+ -+### DrainScanner -+ -+If the intent is to write an application which acts as a heroku drain, -+then using the DrainScanner is preferrable -- primarily because it doesn't -+require structured data. -+ -+``` -+func handler(w http.ResponseWriter, r *http.Request) { -+ s := NewDrainScanner(r.Body) -+ -+ for s.Scan() { -+ log.Printf(""%+v"", scanner.Message()) -+ } -+ -+ if s.Err() != nil { -+ log.Printf(""err: %v"", s.Err()) -+ } -+} -+``` -diff --git a/vendor/github.com/heroku/x/logplex/encoding/encoder.go b/vendor/github.com/heroku/x/logplex/encoding/encoder.go -new file mode 100644 -index 0000000000000..008319a70506f ---- /dev/null -+++ b/vendor/github.com/heroku/x/logplex/encoding/encoder.go -@@ -0,0 +1,126 @@ -+package encoding -+ -+import ( -+ ""fmt"" -+ ""io"" -+ ""strconv"" -+ -+ ""github.com/pkg/errors"" -+) -+ -+// SyslogTimeFormat defines the exact time format used in our logs. -+const SyslogTimeFormat = ""2006-01-02T15:04:05.999999-07:00"" -+ -+// FlexibleSyslogTimeFormat accepts both 'Z' and TZ notation for event time. -+const FlexibleSyslogTimeFormat = ""2006-01-02T15:04:05.999999Z07:00"" -+ -+// HumanTimeFormat defines the human friendly time format used in CLI/UI. -+const HumanTimeFormat = ""2006-01-02T15:04:05.000000-07:00"" -+ -+// L15Error is the message returned with an L15 error -+const L15Error = ""L15: Error displaying log lines. Please try again."" -+ -+// ErrInvalidMessage returned when trying to encode an invalid syslog message -+var ErrInvalidMessage = errors.New(""invalid message"") -+ -+// Encoder abstracts away how messages are written out -+type Encoder interface { -+ Encode(msg Message) error -+ KeepAlive() error -+} -+ -+type plainEncoder struct { -+ w io.Writer -+} -+ -+// NewPlain creates a plain encoder. It dumps the log message directly -+// without massaging it -+func NewPlain(w io.Writer) Encoder { -+ return &plainEncoder{w} -+} -+ -+// Encode writes the message as-is -+func (p *plainEncoder) Encode(msg Message) error { -+ _, err := p.w.Write([]byte(messageToString(msg))) -+ return err -+} -+ -+// KeepAlive sends a null byte. -+func (p *plainEncoder) KeepAlive() error { -+ _, err := p.w.Write([]byte{0}) -+ return err -+} -+ -+// sseEncoder wraps an io.Writer and provides convenience methods for SSE -+type sseEncoder struct { -+ w io.Writer -+} -+ -+// NewSSE instantiates a new SSE encoder -+func NewSSE(w io.Writer) Encoder { -+ return &sseEncoder{w} -+} -+ -+// KeepAlive sends a blank comment. -+func (s *sseEncoder) KeepAlive() error { -+ _, err := fmt.Fprintf(s.w, "": \n"") -+ return err -+} -+ -+// Encode assembles the message according to the SSE spec and writes it out -+func (s *sseEncoder) Encode(msg Message) error { -+ // Use time as the base for creating an ID, since we need monotonic numbers that we can potentially do offsets from -+ s.id(msg.Timestamp.Unix()) -+ s.data(msg) -+ s.separator() -+ return nil -+} -+ -+func (s *sseEncoder) id(id int64) { -+ fmt.Fprintf(s.w, ""id: %v\n"", id) -+} -+ -+func (s *sseEncoder) data(msg Message) { -+ fmt.Fprint(s.w, ""data: "") -+ fmt.Fprint(s.w, messageToString(msg)) -+ fmt.Fprint(s.w, ""\n"") -+} -+ -+func (s *sseEncoder) separator() { -+ fmt.Fprint(s.w, ""\n\n"") -+} -+ -+func messageToString(msg Message) string { -+ return msg.Timestamp.Format(HumanTimeFormat) + "" "" + msg.Application + ""["" + msg.Process + ""]: "" + msg.Message -+} -+ -+// Encode serializes a syslog message into their wire format ( octet-framed syslog ) -+// Disabling RFC 5424 compliance is the default and needed due to https://github.com/heroku/logplex/issues/204 -+func Encode(msg Message) ([]byte, error) { -+ sd := """" -+ if msg.RFCCompliant { -+ sd = ""- "" -+ } -+ -+ if msg.Version == 0 { -+ return nil, errors.Wrap(ErrInvalidMessage, ""version"") -+ } -+ -+ line := ""<"" + strconv.Itoa(int(msg.Priority)) + "">"" + strconv.Itoa(int(msg.Version)) + "" "" + -+ msg.Timestamp.Format(SyslogTimeFormat) + "" "" + -+ stringOrNil(msg.Hostname) + "" "" + -+ stringOrNil(msg.Application) + "" "" + -+ stringOrNil(msg.Process) + "" "" + -+ stringOrNil(msg.ID) + "" "" + -+ sd + -+ msg.Message -+ -+ return []byte(strconv.Itoa(len(line)) + "" "" + line), nil -+} -+ -+func stringOrNil(s string) string { -+ if s == """" { -+ return ""-"" -+ } -+ return s -+} -diff --git a/vendor/github.com/heroku/x/logplex/encoding/message.go b/vendor/github.com/heroku/x/logplex/encoding/message.go -new file mode 100644 -index 0000000000000..dee56f7054f41 ---- /dev/null -+++ b/vendor/github.com/heroku/x/logplex/encoding/message.go -@@ -0,0 +1,40 @@ -+package encoding -+ -+import ( -+ ""io"" -+ ""time"" -+) -+ -+// Message is a syslog message -+type Message struct { -+ Timestamp time.Time -+ Hostname string -+ Application string -+ Process string -+ ID string -+ Message string -+ Version uint16 -+ Priority uint8 -+ RFCCompliant bool -+} -+ -+// Size returns the message size in bytes, including the octet framing header -+func (m Message) Size() (int, error) { -+ b, err := Encode(m) -+ if err != nil { -+ return 0, err -+ } -+ -+ return len(b), nil -+} -+ -+// WriteTo writes the message to a stream -+func (m Message) WriteTo(w io.Writer) (int64, error) { -+ b, err := Encode(m) -+ if err != nil { -+ return 0, err -+ } -+ -+ i, err := w.Write(b) -+ return int64(i), err -+} -diff --git a/vendor/github.com/heroku/x/logplex/encoding/parser.go b/vendor/github.com/heroku/x/logplex/encoding/parser.go -new file mode 100644 -index 0000000000000..448158351a6fa ---- /dev/null -+++ b/vendor/github.com/heroku/x/logplex/encoding/parser.go -@@ -0,0 +1,58 @@ -+package encoding -+ -+import ( -+ ""bufio"" -+ ""bytes"" -+ ""strconv"" -+ -+ ""github.com/pkg/errors"" -+) -+ -+// SyslogSplitFunc splits the data based on the defined length prefix. -+// format: -+//nolint:lll -+// 64 <190>1 2019-07-20T17:50:10.879238Z shuttle token shuttle - - 99\n65 <190>1 2019-07-20T17:50:10.879238Z shuttle token shuttle - - 100\n -+// ^ frame size ^ boundary -+func SyslogSplitFunc(data []byte, atEOF bool) (advance int, token []byte, err error) { -+ // first space gives us the frame size -+ sp := bytes.IndexByte(data, ' ') -+ if sp == -1 { -+ if atEOF && len(data) > 0 { -+ return 0, nil, errors.Wrap(ErrBadFrame, ""missing frame length"") -+ } -+ return 0, nil, nil -+ } -+ -+ if sp == 0 { -+ return 0, nil, errors.Wrap(ErrBadFrame, ""invalid frame length"") -+ } -+ -+ msgSize, err := strconv.ParseUint(string(data[0:sp]), 10, 64) -+ if err != nil { -+ return 0, nil, errors.Wrap(ErrBadFrame, ""couldnt parse frame length"") -+ } -+ -+ // 1 here is the 'space' itself, used in the framing above -+ dataBoundary := sp + int(msgSize) + 1 -+ -+ if dataBoundary > len(data) { -+ if atEOF { -+ return 0, nil, errors.Wrapf(ErrBadFrame, ""message boundary (%d) not respected length (%d)"", dataBoundary, len(data)) -+ } -+ return 0, nil, nil -+ } -+ -+ return dataBoundary, data[sp+1 : dataBoundary], nil -+} -+ -+// TruncatingSyslogSplitFunc enforces a maximum line length after parsing. -+func TruncatingSyslogSplitFunc(maxLength int) bufio.SplitFunc { -+ return func(data []byte, atEOF bool) (advance int, token []byte, err error) { -+ advance, token, err = SyslogSplitFunc(data, atEOF) -+ if len(token) > maxLength { -+ token = token[0:maxLength] -+ } -+ -+ return -+ } -+} -diff --git a/vendor/github.com/heroku/x/logplex/encoding/scanner.go b/vendor/github.com/heroku/x/logplex/encoding/scanner.go -new file mode 100644 -index 0000000000000..d54f4478c7a4c ---- /dev/null -+++ b/vendor/github.com/heroku/x/logplex/encoding/scanner.go -@@ -0,0 +1,264 @@ -+package encoding -+ -+import ( -+ ""bufio"" -+ ""bytes"" -+ ""io"" -+ ""regexp"" -+ ""strconv"" -+ ""time"" -+ -+ ""github.com/pkg/errors"" -+) -+ -+const ( -+ // MaxFrameLength is the maximum message size to parse -+ MaxFrameLength = 10240 -+ -+ // OptimalFrameLength is the initial buffer size for scanning -+ OptimalFrameLength = 1024 -+ -+ defaultRfcCompliance = true -+) -+ -+var ( -+ // ErrBadFrame is returned when the scanner cannot parse syslog message boundaries -+ ErrBadFrame = errors.New(""bad frame"") -+ -+ // ErrInvalidStructuredData is returned when structure data has any value other than '-' (blank) -+ ErrInvalidStructuredData = errors.New(""invalid structured data"") -+ -+ // ErrInvalidPriVal is returned when pri-val is not properly formatted -+ ErrInvalidPriVal = errors.New(""invalid pri-val"") -+ -+ privalVersionRe = regexp.MustCompile(`<(\d+)>(\d)`) -+) -+ -+// Decode converts a rfc5424 message to our model -+func Decode(raw []byte, hasStructuredData bool) (Message, error) { -+ msg := Message{} -+ -+ b := bytes.NewBuffer(raw) -+ priVal, err := syslogField(b) -+ if err != nil { -+ return msg, err -+ } -+ -+ privalVersion := privalVersionRe.FindAllSubmatch(priVal, -1) -+ if len(privalVersion) != 1 || len(privalVersion[0]) != 3 { -+ return msg, ErrInvalidPriVal -+ } -+ prio, err := strconv.ParseUint(string(privalVersion[0][1]), 10, 8) -+ if err != nil { -+ return msg, err -+ } -+ msg.Priority = uint8(prio) -+ -+ version, err := strconv.ParseUint(string(privalVersion[0][2]), 10, 16) -+ if err != nil { -+ return msg, err -+ } -+ msg.Version = uint16(version) -+ -+ rawTime, err := syslogField(b) -+ if err != nil { -+ return msg, err -+ } -+ msg.Timestamp, err = time.Parse(FlexibleSyslogTimeFormat, string(rawTime)) -+ if err != nil { -+ return msg, err -+ } -+ -+ hostname, err := syslogField(b) -+ if err != nil { -+ return msg, err -+ } -+ msg.Hostname = string(hostname) -+ -+ application, err := syslogField(b) -+ if err != nil { -+ return msg, err -+ } -+ msg.Application = string(application) -+ -+ process, err := syslogField(b) -+ if err != nil { -+ return msg, err -+ } -+ msg.Process = string(process) -+ -+ id, err := syslogField(b) -+ if err != nil { -+ return msg, err -+ } -+ msg.ID = string(id) -+ -+ if hasStructuredData { -+ // trash structured data, as we don't use it ever -+ if err = trashStructuredData(b); err != nil { -+ return msg, err -+ } -+ } -+ -+ msg.Message = b.String() -+ -+ return msg, nil -+} -+ -+// syslogScanner is a octet-frame syslog parser -+type syslogScanner struct { -+ parser *bufio.Scanner -+ item Message -+ err error -+ rfcCompliant bool -+} -+ -+// Scanner is the general purpose primitive for parsing message bodies coming -+// from log-shuttle, logfwd, logplex and all sorts of logging components. -+type Scanner interface { -+ Scan() bool -+ Err() error -+ Message() Message -+} -+ -+type ScannerOption func(*syslogScanner) -+ -+func WithBuffer(optimalFrameLength, maxFrameLength int) ScannerOption { -+ return func(s *syslogScanner) { -+ s.parser.Buffer(make([]byte, optimalFrameLength), maxFrameLength) -+ } -+} -+ -+func WithSplit(splitFunc bufio.SplitFunc) ScannerOption { -+ return func(s *syslogScanner) { -+ s.parser.Split(splitFunc) -+ } -+} -+ -+func RFCCompliant(compliant bool) ScannerOption { -+ return func(s *syslogScanner) { -+ s.rfcCompliant = compliant -+ } -+} -+ -+// NewScanner is a syslog octet frame stream parser -+func NewScanner(r io.Reader, opts ...ScannerOption) Scanner { -+ s := &syslogScanner{ -+ parser: bufio.NewScanner(r), -+ } -+ -+ // ensure some defaults are set -+ s.rfcCompliant = defaultRfcCompliance -+ s.parser.Buffer(make([]byte, OptimalFrameLength), MaxFrameLength) -+ s.parser.Split(SyslogSplitFunc) -+ -+ // allow customization of Buffer and Split -+ for _, opt := range opts { -+ opt(s) -+ } -+ -+ return s -+} -+ -+// Message returns the current message -+func (s *syslogScanner) Message() Message { -+ return s.item -+} -+ -+// Err returns the last scanner error -+func (s *syslogScanner) Err() error { -+ if err := s.parser.Err(); err != nil { -+ return err -+ } -+ -+ return s.err -+} -+ -+// Scan returns true until all messages are parsed or an error occurs. -+// When an error occur, the underlying error will be presented as `Err()` -+func (s *syslogScanner) Scan() bool { -+ if !s.parser.Scan() { -+ return false -+ } -+ -+ s.item, s.err = Decode(s.parser.Bytes(), s.rfcCompliant) -+ return s.err == nil -+} -+ -+// NewDrainScanner returns a scanner for use with drain endpoints. The primary -+// difference is that it's loose and doesn't check for structured data. -+func NewDrainScanner(r io.Reader, opts ...ScannerOption) Scanner { -+ opts = append(opts, RFCCompliant(false)) -+ return NewScanner(r, opts...) -+} -+ -+func syslogField(b *bytes.Buffer) ([]byte, error) { -+ g, err := b.ReadBytes(' ') -+ if err != nil { -+ return nil, err -+ } -+ if len(g) > 0 { -+ g = g[:len(g)-1] -+ } -+ return g, nil -+} -+ -+func trashStructuredData(b *bytes.Buffer) error { -+ // notice the quoting -+ // [meta sequenceId=\""518\""][meta somethingElse=\""bl\]ah\""] -+ firstChar, err := b.ReadByte() -+ if err != nil { -+ return err -+ } -+ -+ if firstChar == '-' { -+ // trash the following space too -+ _, err = b.ReadByte() -+ return err -+ } -+ -+ if firstChar != '[' { -+ return ErrInvalidStructuredData -+ } -+ -+ quoting := false -+ bracketing := true -+ -+ for { -+ c, err := b.ReadByte() -+ if err != nil { -+ return err -+ } -+ -+ if !bracketing { -+ if c == ' ' { -+ // we done! -+ // consumed the last ']' and hit a space -+ break -+ } -+ -+ if c != '[' { -+ return ErrInvalidStructuredData -+ } -+ -+ bracketing = true -+ continue -+ } -+ -+ // makes sure we dont catch '\]' as per RFC -+ // PARAM-VALUE = UTF-8-STRING ; characters '""', '\' and ']' MUST be escaped. -+ if quoting { -+ quoting = false -+ continue -+ } -+ -+ switch c { -+ case '\\': -+ quoting = true -+ case ']': -+ bracketing = false -+ } -+ } -+ -+ return nil -+} -diff --git a/vendor/modules.txt b/vendor/modules.txt -index ce0694e75462d..c08d52b94bc85 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -620,6 +620,9 @@ github.com/hashicorp/memberlist - # github.com/hashicorp/serf v0.9.6 - ## explicit; go 1.12 - github.com/hashicorp/serf/coordinate -+# github.com/heroku/x v0.0.50 -+## explicit; go 1.12 -+github.com/heroku/x/logplex/encoding - # github.com/hpcloud/tail v1.0.0 => github.com/grafana/tail v0.0.0-20220426200921-98e8eb28ea4c - ## explicit; go 1.13 - github.com/hpcloud/tail",Targets,Add Heroku HTTPS drain target (#6448) -88ef940ede3ecf965481a104b4fdb35cad7bb79f,2021-11-16 06:22:17,Ed Welch,"Loki: Set querier worker max concurrent regardless of run configuration. (#4761) - -* always set the querier worker MaxConcurrentRequests - -* removing a test which doesn't apply as this setting isn't being set in worker_service.go anymore",False,"diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go -index 18d30c5801573..c11b301c98fdc 100644 ---- a/pkg/loki/modules.go -+++ b/pkg/loki/modules.go -@@ -212,6 +212,8 @@ func (t *Loki) initQuerier() (services.Service, error) { - if t.Cfg.Ingester.QueryStoreMaxLookBackPeriod != 0 { - t.Cfg.Querier.IngesterQueryStoreMaxLookback = t.Cfg.Ingester.QueryStoreMaxLookBackPeriod - } -+ // Querier worker's max concurrent requests must be the same as the querier setting -+ t.Cfg.Worker.MaxConcurrentRequests = t.Cfg.Querier.MaxConcurrent - - var err error - t.Querier, err = querier.New(t.Cfg.Querier, t.Store, t.ingesterQuerier, t.overrides) -diff --git a/pkg/querier/worker_service.go b/pkg/querier/worker_service.go -index 6a3fa3afd89b1..9eeb6a8bcfb25 100644 ---- a/pkg/querier/worker_service.go -+++ b/pkg/querier/worker_service.go -@@ -131,9 +131,6 @@ func InitWorkerService( - - internalHandler = internalMiddleware.Wrap(internalHandler) - -- //Querier worker's max concurrent requests must be the same as the querier setting -- (*cfg.QuerierWorkerConfig).MaxConcurrentRequests = cfg.QuerierMaxConcurrent -- - //Return a querier worker pointed to the internal querier HTTP handler so there is not a conflict in routes between the querier - //and the query frontend - return querier_worker.NewQuerierWorker( -diff --git a/pkg/querier/worker_service_test.go b/pkg/querier/worker_service_test.go -index 3eadfc82e1e9a..5a282a5d8f024 100644 ---- a/pkg/querier/worker_service_test.go -+++ b/pkg/querier/worker_service_test.go -@@ -224,18 +224,6 @@ func Test_InitQuerierService(t *testing.T) { - } - }) - -- t.Run(""set the worker's max concurrent request to the same as the max concurrent setting for the querier"", func(t *testing.T) { -- for _, config := range nonStandaloneTargetPermutations { -- workerConfig := querier_worker.Config{} -- config.QuerierWorkerConfig = &workerConfig -- config.QuerierMaxConcurrent = 42 -- -- testContext(config, nil) -- -- assert.Equal(t, 42, workerConfig.MaxConcurrentRequests) -- } -- }) -- - t.Run(""always return a query worker service"", func(t *testing.T) { - for _, config := range nonStandaloneTargetPermutations { - workerConfig := querier_worker.Config{}",Loki,"Set querier worker max concurrent regardless of run configuration. (#4761) - -* always set the querier worker MaxConcurrentRequests - -* removing a test which doesn't apply as this setting isn't being set in worker_service.go anymore" -d8b1818b21972ed5c727d74cd5941f12e422a841,2025-01-24 20:48:08,renovate[bot],"chore(deps): update helm release minio to v5.4.0 (main) (#15946) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/production/helm/loki/Chart.lock b/production/helm/loki/Chart.lock -index beb95db3974ce..60417bd7d191b 100644 ---- a/production/helm/loki/Chart.lock -+++ b/production/helm/loki/Chart.lock -@@ -1,12 +1,12 @@ - dependencies: - - name: minio - repository: https://charts.min.io/ -- version: 5.3.0 -+ version: 5.4.0 - - name: grafana-agent-operator - repository: https://grafana.github.io/helm-charts - version: 0.5.0 - - name: rollout-operator - repository: https://grafana.github.io/helm-charts - version: 0.22.0 --digest: sha256:b52eac8eb486759c2b4b55ac111a00680bb3d872c9ad89434bc58b087fcd1d80 --generated: ""2024-12-14T21:29:47.873748325Z"" -+digest: sha256:48d111dd19a29685e51e7817f5e0c16366380d384dc5e14ca299755896489eff -+generated: ""2025-01-24T15:11:31.126201473Z"" -diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml -index 234639a93a95c..1e06ae151c90e 100644 ---- a/production/helm/loki/Chart.yaml -+++ b/production/helm/loki/Chart.yaml -@@ -13,7 +13,7 @@ icon: https://grafana.com/docs/loki/latest/logo_and_name.png - dependencies: - - name: minio - alias: minio -- version: 5.3.0 -+ version: 5.4.0 - repository: https://charts.min.io/ - condition: minio.enabled - - name: grafana-agent-operator",chore,"update helm release minio to v5.4.0 (main) (#15946) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -a419f9c1c9985f0faf7ed53346ea6daa6a85f6cd,2021-03-11 22:57:10,Chance Zibolski,docs/upgrading: Fix typo (#3478),False,"diff --git a/docs/sources/upgrading/_index.md b/docs/sources/upgrading/_index.md -index e981ecb189a5d..4fd8f55916111 100644 ---- a/docs/sources/upgrading/_index.md -+++ b/docs/sources/upgrading/_index.md -@@ -47,7 +47,7 @@ In 2.2 we changed this behavior to apply the `max_query_parallelism` after split - - You could consider multiplying your current `max_query_parallelism` setting by 16 to obtain the previous behavior, though in practice we suspect few people would really want it this high unless you have a significant querier worker pool. - --**Also be aware to make sure `max_outsdanting_per_tenant` is always greater than `max_query_parallelism` or large queries will automatically fail with a 429 back to the user.** -+**Also be aware to make sure `max_outstanding_per_tenant` is always greater than `max_query_parallelism` or large queries will automatically fail with a 429 back to the user.**",unknown,docs/upgrading: Fix typo (#3478) -6284ed5e82b472d816f7de1f8ecbe937f58c0d5e,2024-08-21 05:04:56,Juraj Michálek,chore: make mixin range interval configurable (#13925),False,"diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-reads.json b/production/loki-mixin-compiled-ssd/dashboards/loki-reads.json -index 016917dd76da7..d14ef45e63830 100644 ---- a/production/loki-mixin-compiled-ssd/dashboards/loki-reads.json -+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-reads.json -@@ -265,26 +265,20 @@ - { - ""expr"": ""histogram_quantile(0.99, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/(loki.*|enterprise-logs)-read\"", route=~\""(api_prom_rules|api_prom_rules_namespace_groupname|api_v1_rules|loki_api_v1_delete|loki_api_v1_detected_labels|loki_api_v1_index_stats|loki_api_v1_index_volume|loki_api_v1_index_volume_range|loki_api_v1_label_name_values|loki_api_v1_label_values|loki_api_v1_labels|loki_api_v1_patterns|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_series|otlp_v1_logs|prometheus_api_v1_rules)\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""{{ route }} 99th Percentile"", -- ""refId"": ""A"", -- ""step"": 10 -+ ""legendFormat"": ""{{ route }} 99th percentile"", -+ ""refId"": ""A"" - }, - { - ""expr"": ""histogram_quantile(0.50, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/(loki.*|enterprise-logs)-read\"", route=~\""(api_prom_rules|api_prom_rules_namespace_groupname|api_v1_rules|loki_api_v1_delete|loki_api_v1_detected_labels|loki_api_v1_index_stats|loki_api_v1_index_volume|loki_api_v1_index_volume_range|loki_api_v1_label_name_values|loki_api_v1_label_values|loki_api_v1_labels|loki_api_v1_patterns|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_series|otlp_v1_logs|prometheus_api_v1_rules)\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""{{ route }} 50th Percentile"", -- ""refId"": ""B"", -- ""step"": 10 -+ ""legendFormat"": ""{{ route }} 50th percentile"", -+ ""refId"": ""B"" - }, - { - ""expr"": ""1e3 * sum(cluster_job_route:loki_request_duration_seconds_sum:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/(loki.*|enterprise-logs)-read\"", route=~\""(api_prom_rules|api_prom_rules_namespace_groupname|api_v1_rules|loki_api_v1_delete|loki_api_v1_detected_labels|loki_api_v1_index_stats|loki_api_v1_index_volume|loki_api_v1_index_volume_range|loki_api_v1_label_name_values|loki_api_v1_label_values|loki_api_v1_labels|loki_api_v1_patterns|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_series|otlp_v1_logs|prometheus_api_v1_rules)\""}) by (route) / sum(cluster_job_route:loki_request_duration_seconds_count:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/(loki.*|enterprise-logs)-read\"", route=~\""(api_prom_rules|api_prom_rules_namespace_groupname|api_v1_rules|loki_api_v1_delete|loki_api_v1_detected_labels|loki_api_v1_index_stats|loki_api_v1_index_volume|loki_api_v1_index_volume_range|loki_api_v1_label_name_values|loki_api_v1_label_values|loki_api_v1_labels|loki_api_v1_patterns|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_series|otlp_v1_logs|prometheus_api_v1_rules)\""}) by (route) "", - ""format"": ""time_series"", -- ""intervalFactor"": 2, - ""legendFormat"": ""{{ route }} Average"", -- ""refId"": ""C"", -- ""step"": 10 -+ ""refId"": ""C"" - } - ], - ""title"": ""Latency"", -diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-writes.json b/production/loki-mixin-compiled-ssd/dashboards/loki-writes.json -index b78ffce9f2c72..12aff66ec9378 100644 ---- a/production/loki-mixin-compiled-ssd/dashboards/loki-writes.json -+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-writes.json -@@ -265,26 +265,20 @@ - { - ""expr"": ""histogram_quantile(0.99, sum by (le) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/(loki.*|enterprise-logs)-write\"", route=~\""api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""99th Percentile"", -- ""refId"": ""A"", -- ""step"": 10 -+ ""legendFormat"": ""99th percentile"", -+ ""refId"": ""A"" - }, - { - ""expr"": ""histogram_quantile(0.50, sum by (le) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/(loki.*|enterprise-logs)-write\"", route=~\""api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""50th Percentile"", -- ""refId"": ""B"", -- ""step"": 10 -+ ""legendFormat"": ""50th percentile"", -+ ""refId"": ""B"" - }, - { - ""expr"": ""1e3 * sum(cluster_job_route:loki_request_duration_seconds_sum:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/(loki.*|enterprise-logs)-write\"", route=~\""api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\""}) / sum(cluster_job_route:loki_request_duration_seconds_count:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/(loki.*|enterprise-logs)-write\"", route=~\""api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\""})"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, - ""legendFormat"": ""Average"", -- ""refId"": ""C"", -- ""step"": 10 -+ ""refId"": ""C"" - } - ], - ""title"": ""Latency"", -diff --git a/production/loki-mixin-compiled/dashboards/loki-reads.json b/production/loki-mixin-compiled/dashboards/loki-reads.json -index ed65027b88d85..e9db1fc697c80 100644 ---- a/production/loki-mixin-compiled/dashboards/loki-reads.json -+++ b/production/loki-mixin-compiled/dashboards/loki-reads.json -@@ -265,26 +265,20 @@ - { - ""expr"": ""histogram_quantile(0.99, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/query-frontend\"", route=~\""(api_prom_rules|api_prom_rules_namespace_groupname|api_v1_rules|loki_api_v1_delete|loki_api_v1_detected_labels|loki_api_v1_index_stats|loki_api_v1_index_volume|loki_api_v1_index_volume_range|loki_api_v1_label_name_values|loki_api_v1_label_values|loki_api_v1_labels|loki_api_v1_patterns|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_series|otlp_v1_logs|prometheus_api_v1_rules)\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""{{ route }} 99th Percentile"", -- ""refId"": ""A"", -- ""step"": 10 -+ ""legendFormat"": ""{{ route }} 99th percentile"", -+ ""refId"": ""A"" - }, - { - ""expr"": ""histogram_quantile(0.50, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/query-frontend\"", route=~\""(api_prom_rules|api_prom_rules_namespace_groupname|api_v1_rules|loki_api_v1_delete|loki_api_v1_detected_labels|loki_api_v1_index_stats|loki_api_v1_index_volume|loki_api_v1_index_volume_range|loki_api_v1_label_name_values|loki_api_v1_label_values|loki_api_v1_labels|loki_api_v1_patterns|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_series|otlp_v1_logs|prometheus_api_v1_rules)\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""{{ route }} 50th Percentile"", -- ""refId"": ""B"", -- ""step"": 10 -+ ""legendFormat"": ""{{ route }} 50th percentile"", -+ ""refId"": ""B"" - }, - { - ""expr"": ""1e3 * sum(cluster_job_route:loki_request_duration_seconds_sum:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/query-frontend\"", route=~\""(api_prom_rules|api_prom_rules_namespace_groupname|api_v1_rules|loki_api_v1_delete|loki_api_v1_detected_labels|loki_api_v1_index_stats|loki_api_v1_index_volume|loki_api_v1_index_volume_range|loki_api_v1_label_name_values|loki_api_v1_label_values|loki_api_v1_labels|loki_api_v1_patterns|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_series|otlp_v1_logs|prometheus_api_v1_rules)\""}) by (route) / sum(cluster_job_route:loki_request_duration_seconds_count:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/query-frontend\"", route=~\""(api_prom_rules|api_prom_rules_namespace_groupname|api_v1_rules|loki_api_v1_delete|loki_api_v1_detected_labels|loki_api_v1_index_stats|loki_api_v1_index_volume|loki_api_v1_index_volume_range|loki_api_v1_label_name_values|loki_api_v1_label_values|loki_api_v1_labels|loki_api_v1_patterns|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_series|otlp_v1_logs|prometheus_api_v1_rules)\""}) by (route) "", - ""format"": ""time_series"", -- ""intervalFactor"": 2, - ""legendFormat"": ""{{ route }} Average"", -- ""refId"": ""C"", -- ""step"": 10 -+ ""refId"": ""C"" - } - ], - ""title"": ""Latency"", -@@ -610,26 +604,20 @@ - { - ""expr"": ""histogram_quantile(0.99, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/querier\"", route=~\""(api_prom_rules|api_prom_rules_namespace_groupname|api_v1_rules|loki_api_v1_delete|loki_api_v1_detected_labels|loki_api_v1_index_stats|loki_api_v1_index_volume|loki_api_v1_index_volume_range|loki_api_v1_label_name_values|loki_api_v1_label_values|loki_api_v1_labels|loki_api_v1_patterns|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_series|otlp_v1_logs|prometheus_api_v1_rules)\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""{{ route }} 99th Percentile"", -- ""refId"": ""A"", -- ""step"": 10 -+ ""legendFormat"": ""{{ route }} 99th percentile"", -+ ""refId"": ""A"" - }, - { - ""expr"": ""histogram_quantile(0.50, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/querier\"", route=~\""(api_prom_rules|api_prom_rules_namespace_groupname|api_v1_rules|loki_api_v1_delete|loki_api_v1_detected_labels|loki_api_v1_index_stats|loki_api_v1_index_volume|loki_api_v1_index_volume_range|loki_api_v1_label_name_values|loki_api_v1_label_values|loki_api_v1_labels|loki_api_v1_patterns|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_series|otlp_v1_logs|prometheus_api_v1_rules)\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""{{ route }} 50th Percentile"", -- ""refId"": ""B"", -- ""step"": 10 -+ ""legendFormat"": ""{{ route }} 50th percentile"", -+ ""refId"": ""B"" - }, - { - ""expr"": ""1e3 * sum(cluster_job_route:loki_request_duration_seconds_sum:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/querier\"", route=~\""(api_prom_rules|api_prom_rules_namespace_groupname|api_v1_rules|loki_api_v1_delete|loki_api_v1_detected_labels|loki_api_v1_index_stats|loki_api_v1_index_volume|loki_api_v1_index_volume_range|loki_api_v1_label_name_values|loki_api_v1_label_values|loki_api_v1_labels|loki_api_v1_patterns|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_series|otlp_v1_logs|prometheus_api_v1_rules)\""}) by (route) / sum(cluster_job_route:loki_request_duration_seconds_count:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/querier\"", route=~\""(api_prom_rules|api_prom_rules_namespace_groupname|api_v1_rules|loki_api_v1_delete|loki_api_v1_detected_labels|loki_api_v1_index_stats|loki_api_v1_index_volume|loki_api_v1_index_volume_range|loki_api_v1_label_name_values|loki_api_v1_label_values|loki_api_v1_labels|loki_api_v1_patterns|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_series|otlp_v1_logs|prometheus_api_v1_rules)\""}) by (route) "", - ""format"": ""time_series"", -- ""intervalFactor"": 2, - ""legendFormat"": ""{{ route }} Average"", -- ""refId"": ""C"", -- ""step"": 10 -+ ""refId"": ""C"" - } - ], - ""title"": ""Latency"", -@@ -955,26 +943,20 @@ - { - ""expr"": ""histogram_quantile(0.99, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/ingester\"", route=~\""(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""{{ route }} 99th Percentile"", -- ""refId"": ""A"", -- ""step"": 10 -+ ""legendFormat"": ""{{ route }} 99th percentile"", -+ ""refId"": ""A"" - }, - { - ""expr"": ""histogram_quantile(0.50, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/ingester\"", route=~\""(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""{{ route }} 50th Percentile"", -- ""refId"": ""B"", -- ""step"": 10 -+ ""legendFormat"": ""{{ route }} 50th percentile"", -+ ""refId"": ""B"" - }, - { - ""expr"": ""1e3 * sum(cluster_job_route:loki_request_duration_seconds_sum:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/ingester\"", route=~\""(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\""}) by (route) / sum(cluster_job_route:loki_request_duration_seconds_count:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/ingester\"", route=~\""(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\""}) by (route) "", - ""format"": ""time_series"", -- ""intervalFactor"": 2, - ""legendFormat"": ""{{ route }} Average"", -- ""refId"": ""C"", -- ""step"": 10 -+ ""refId"": ""C"" - } - ], - ""title"": ""Latency"", -@@ -1300,26 +1282,20 @@ - { - ""expr"": ""histogram_quantile(0.99, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/ingester-zone.*\"", route=~\""(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""{{ route }} 99th Percentile"", -- ""refId"": ""A"", -- ""step"": 10 -+ ""legendFormat"": ""{{ route }} 99th percentile"", -+ ""refId"": ""A"" - }, - { - ""expr"": ""histogram_quantile(0.50, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/ingester-zone.*\"", route=~\""(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""{{ route }} 50th Percentile"", -- ""refId"": ""B"", -- ""step"": 10 -+ ""legendFormat"": ""{{ route }} 50th percentile"", -+ ""refId"": ""B"" - }, - { - ""expr"": ""1e3 * sum(cluster_job_route:loki_request_duration_seconds_sum:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/ingester-zone.*\"", route=~\""(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\""}) by (route) / sum(cluster_job_route:loki_request_duration_seconds_count:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/ingester-zone.*\"", route=~\""(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\""}) by (route) "", - ""format"": ""time_series"", -- ""intervalFactor"": 2, - ""legendFormat"": ""{{ route }} Average"", -- ""refId"": ""C"", -- ""step"": 10 -+ ""refId"": ""C"" - } - ], - ""title"": ""Latency"", -@@ -1645,26 +1621,20 @@ - { - ""expr"": ""histogram_quantile(0.99, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/index-gateway\"", route=~\""(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""{{ route }} 99th Percentile"", -- ""refId"": ""A"", -- ""step"": 10 -+ ""legendFormat"": ""{{ route }} 99th percentile"", -+ ""refId"": ""A"" - }, - { - ""expr"": ""histogram_quantile(0.50, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/index-gateway\"", route=~\""(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""{{ route }} 50th Percentile"", -- ""refId"": ""B"", -- ""step"": 10 -+ ""legendFormat"": ""{{ route }} 50th percentile"", -+ ""refId"": ""B"" - }, - { - ""expr"": ""1e3 * sum(cluster_job_route:loki_request_duration_seconds_sum:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/index-gateway\"", route=~\""(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\""}) by (route) / sum(cluster_job_route:loki_request_duration_seconds_count:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/index-gateway\"", route=~\""(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\""}) by (route) "", - ""format"": ""time_series"", -- ""intervalFactor"": 2, - ""legendFormat"": ""{{ route }} Average"", -- ""refId"": ""C"", -- ""step"": 10 -+ ""refId"": ""C"" - } - ], - ""title"": ""Latency"", -@@ -1990,26 +1960,20 @@ - { - ""expr"": ""histogram_quantile(0.99, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/bloom-gateway\"", route=~\""(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""{{ route }} 99th Percentile"", -- ""refId"": ""A"", -- ""step"": 10 -+ ""legendFormat"": ""{{ route }} 99th percentile"", -+ ""refId"": ""A"" - }, - { - ""expr"": ""histogram_quantile(0.50, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/bloom-gateway\"", route=~\""(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""{{ route }} 50th Percentile"", -- ""refId"": ""B"", -- ""step"": 10 -+ ""legendFormat"": ""{{ route }} 50th percentile"", -+ ""refId"": ""B"" - }, - { - ""expr"": ""1e3 * sum(cluster_job_route:loki_request_duration_seconds_sum:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/bloom-gateway\"", route=~\""(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\""}) by (route) / sum(cluster_job_route:loki_request_duration_seconds_count:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/bloom-gateway\"", route=~\""(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\""}) by (route) "", - ""format"": ""time_series"", -- ""intervalFactor"": 2, - ""legendFormat"": ""{{ route }} Average"", -- ""refId"": ""C"", -- ""step"": 10 -+ ""refId"": ""C"" - } - ], - ""title"": ""Latency"", -diff --git a/production/loki-mixin-compiled/dashboards/loki-writes.json b/production/loki-mixin-compiled/dashboards/loki-writes.json -index a4ff98dfbf0bd..5a174af381e09 100644 ---- a/production/loki-mixin-compiled/dashboards/loki-writes.json -+++ b/production/loki-mixin-compiled/dashboards/loki-writes.json -@@ -265,26 +265,20 @@ - { - ""expr"": ""histogram_quantile(0.99, sum by (le) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/distributor\"", route=~\""api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""99th Percentile"", -- ""refId"": ""A"", -- ""step"": 10 -+ ""legendFormat"": ""99th percentile"", -+ ""refId"": ""A"" - }, - { - ""expr"": ""histogram_quantile(0.50, sum by (le) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/distributor\"", route=~\""api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""50th Percentile"", -- ""refId"": ""B"", -- ""step"": 10 -+ ""legendFormat"": ""50th percentile"", -+ ""refId"": ""B"" - }, - { - ""expr"": ""1e3 * sum(cluster_job_route:loki_request_duration_seconds_sum:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/distributor\"", route=~\""api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\""}) / sum(cluster_job_route:loki_request_duration_seconds_count:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/distributor\"", route=~\""api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\""})"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, - ""legendFormat"": ""Average"", -- ""refId"": ""C"", -- ""step"": 10 -+ ""refId"": ""C"" - } - ], - ""title"": ""Latency"", -@@ -735,26 +729,20 @@ - { - ""expr"": ""histogram_quantile(0.99, sum by (le) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/ingester-zone.*\"", route=\""/logproto.Pusher/Push\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""99th Percentile"", -- ""refId"": ""A"", -- ""step"": 10 -+ ""legendFormat"": ""99th percentile"", -+ ""refId"": ""A"" - }, - { - ""expr"": ""histogram_quantile(0.50, sum by (le) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/ingester-zone.*\"", route=\""/logproto.Pusher/Push\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""50th Percentile"", -- ""refId"": ""B"", -- ""step"": 10 -+ ""legendFormat"": ""50th percentile"", -+ ""refId"": ""B"" - }, - { - ""expr"": ""1e3 * sum(cluster_job_route:loki_request_duration_seconds_sum:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/ingester-zone.*\"", route=\""/logproto.Pusher/Push\""}) / sum(cluster_job_route:loki_request_duration_seconds_count:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/ingester-zone.*\"", route=\""/logproto.Pusher/Push\""})"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, - ""legendFormat"": ""Average"", -- ""refId"": ""C"", -- ""step"": 10 -+ ""refId"": ""C"" - } - ], - ""title"": ""Latency"", -@@ -1080,26 +1068,20 @@ - { - ""expr"": ""histogram_quantile(0.99, sum by (le) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/ingester\"", route=\""/logproto.Pusher/Push\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""99th Percentile"", -- ""refId"": ""A"", -- ""step"": 10 -+ ""legendFormat"": ""99th percentile"", -+ ""refId"": ""A"" - }, - { - ""expr"": ""histogram_quantile(0.50, sum by (le) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/ingester\"", route=\""/logproto.Pusher/Push\""})) * 1e3"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, -- ""legendFormat"": ""50th Percentile"", -- ""refId"": ""B"", -- ""step"": 10 -+ ""legendFormat"": ""50th percentile"", -+ ""refId"": ""B"" - }, - { - ""expr"": ""1e3 * sum(cluster_job_route:loki_request_duration_seconds_sum:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/ingester\"", route=\""/logproto.Pusher/Push\""}) / sum(cluster_job_route:loki_request_duration_seconds_count:sum_rate{cluster=~\""$cluster\"", job=~\""($namespace)/ingester\"", route=\""/logproto.Pusher/Push\""})"", - ""format"": ""time_series"", -- ""intervalFactor"": 2, - ""legendFormat"": ""Average"", -- ""refId"": ""C"", -- ""step"": 10 -+ ""refId"": ""C"" - } - ], - ""title"": ""Latency"", -diff --git a/production/loki-mixin/config.libsonnet b/production/loki-mixin/config.libsonnet -index bdf5c9de5b58a..98d002ee84969 100644 ---- a/production/loki-mixin/config.libsonnet -+++ b/production/loki-mixin/config.libsonnet -@@ -23,6 +23,11 @@ - // Enable TSDB specific dashboards - tsdb: true, - -+ // Tunes histogram recording rules to aggregate over this interval. -+ // Set to at least twice the scrape interval; otherwise, recording rules will output no data. -+ // Set to four times the scrape interval to account for edge cases: https://www.robustperception.io/what-range-should-i-use-with-rate/ -+ recording_rules_range_interval: '1m', -+ - // SSD related configuration for dashboards. - ssd: { - // Support Loki SSD mode on dashboards. -diff --git a/production/loki-mixin/jsonnetfile.lock.json b/production/loki-mixin/jsonnetfile.lock.json -index f895125a2aa3c..7fd61db65a6e1 100644 ---- a/production/loki-mixin/jsonnetfile.lock.json -+++ b/production/loki-mixin/jsonnetfile.lock.json -@@ -8,8 +8,8 @@ - ""subdir"": ""grafonnet"" - } - }, -- ""version"": ""6db00c292d3a1c71661fc875f90e0ec7caa538c2"", -- ""sum"": ""gF8foHByYcB25jcUOBqP6jxk0OPifQMjPvKY0HaCk6w="" -+ ""version"": ""a1d61cce1da59c71409b99b5c7568511fec661ea"", -+ ""sum"": ""342u++/7rViR/zj2jeJOjshzglkZ1SY+hFNuyCBFMdc="" - }, - { - ""source"": { -@@ -18,8 +18,8 @@ - ""subdir"": ""grafana-builder"" - } - }, -- ""version"": ""f95501009c9b29bed87fe9d57c1a6e72e210f137"", -- ""sum"": ""+z5VY+bPBNqXcmNAV8xbJcbsRA+pro1R3IM7aIY8OlU="" -+ ""version"": ""1d31bb1b58a2a2a3ffb2296cd5aa4d5e4ae5576b"", -+ ""sum"": ""yxqWcq/N3E/a/XreeU6EuE6X7kYPnG0AspAQFKOjASo="" - }, - { - ""source"": { -@@ -28,8 +28,8 @@ - ""subdir"": ""mixin-utils"" - } - }, -- ""version"": ""3f71e00a64810075b5d5f969cc6d0e419cbdebc4"", -- ""sum"": ""v6fuqqQp9rHZbsxN9o79QzOpUlwYZEJ84DxTCZMCYeU="" -+ ""version"": ""1d31bb1b58a2a2a3ffb2296cd5aa4d5e4ae5576b"", -+ ""sum"": ""LoYq5QxJmUXEtqkEG8CFUBLBhhzDDaNANHc7Gz36ZdM="" - }, - { - ""source"": { -@@ -38,8 +38,8 @@ - ""subdir"": ""operations/mimir-mixin"" - } - }, -- ""version"": ""91986521f324c84a9cf869529bd901f077ddf8bc"", -- ""sum"": ""eBp1Oo3j0YiI5hv9YrZb0lJQxEOC17rP3pZiKM/R3Zo="" -+ ""version"": ""61dd68b7397e197ff6bb81251737ead633fd42b9"", -+ ""sum"": ""en5dwfa7Hh4t7pWAoMc1uDXHGStqxqL1Vj+E/5NGwRQ="" - }, - { - ""source"": { -@@ -48,8 +48,8 @@ - ""subdir"": ""jsonnet/kube-prometheus/lib"" - } - }, -- ""version"": ""b1c474d8a1d7cd73df9bf4efe1680f1e6d9f5c17"", -- ""sum"": ""2KXEfdW5YJem19w1VjQNvaTIOhdl8KFi4dvmUdWJtro="" -+ ""version"": ""74f4e0cda3f3c2a4e8a1ab7d9bdbee019a47c851"", -+ ""sum"": ""QKRgrgEZ3k9nLmLCrDBaeIGVqQZf+AvZTcnhdLk3TrA="" - } - ], - ""legacyImports"": false -diff --git a/production/loki-mixin/recording_rules.libsonnet b/production/loki-mixin/recording_rules.libsonnet -index 2feda5cac6e8a..e34c861b17041 100644 ---- a/production/loki-mixin/recording_rules.libsonnet -+++ b/production/loki-mixin/recording_rules.libsonnet -@@ -5,9 +5,9 @@ local utils = import 'mixin-utils/utils.libsonnet'; - groups+: [{ - name: 'loki_rules', - rules: -- utils.histogramRules('loki_request_duration_seconds', [$._config.per_cluster_label, 'job']) + -- utils.histogramRules('loki_request_duration_seconds', [$._config.per_cluster_label, 'job', 'route']) + -- utils.histogramRules('loki_request_duration_seconds', [$._config.per_cluster_label, 'namespace', 'job', 'route']), -+ utils.histogramRules('loki_request_duration_seconds', [$._config.per_cluster_label, 'job'], $._config.recording_rules_range_interval) + -+ utils.histogramRules('loki_request_duration_seconds', [$._config.per_cluster_label, 'job', 'route'], $._config.recording_rules_range_interval) + -+ utils.histogramRules('loki_request_duration_seconds', [$._config.per_cluster_label, 'namespace', 'job', 'route'], $._config.recording_rules_range_interval), - }], - }, - }",chore,make mixin range interval configurable (#13925) -6bbb61eb2f52288038ae0112a3b577a1b7861d4d,2020-02-11 22:06:36,Owen Diehl,"Binary operators in LogQL (#1662) - -* binops in ast - -* bin op associativity & precedence - -* binOpEvaluator work - -* defers close only if constructed without error - -* tests binary ops - -* more binops - -* updates docs - -* changelog - -* better logql parsing errors for binops - -Signed-off-by: Owen Diehl - -* adds ^ operator",False,"diff --git a/CHANGELOG.md b/CHANGELOG.md -index 7c910b53664e8..52833472abde0 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -2,6 +2,7 @@ - - ### Features - -+* [1662](https://github.com/grafana/loki/pull/1662) **owen-d**: Introduces binary operators in LogQL - * [1572](https://github.com/grafana/loki/pull/1572) **owen-d**: Introduces the `querier.query-ingesters-within` flag and associated yaml config. When enabled, queries for a time range that do not overlap this lookback interval will not be sent to the ingesters. - * [1558](https://github.com/grafana/loki/pull/1558) **owen-d**: Introduces `ingester.max-chunk-age` which specifies the maximum chunk age before it's cut. - * [1565](https://github.com/grafana/loki/pull/1565) **owen-d**: The query frontend's `split_queries_by_interval` can now be specified as an override -diff --git a/docs/logql.md b/docs/logql.md -index 9fb70a0c1a266..284e825af054c 100644 ---- a/docs/logql.md -+++ b/docs/logql.md -@@ -151,3 +151,50 @@ by level: - Get the rate of HTTP GET requests from NGINX logs: - - > `avg(rate(({job=""nginx""} |= ""GET"")[10s])) by (region)` -+ -+### Binary Operators -+ -+#### Arithmetic Binary Operators -+ -+Arithmetic binary operators -+The following binary arithmetic operators exist in Loki: -+ -+- `+` (addition) -+- `-` (subtraction) -+- `*` (multiplication) -+- `/` (division) -+- `%` (modulo) -+- `^` (power/exponentiation) -+ -+Binary arithmetic operators are defined only between two vectors. -+ -+Between two instant vectors, a binary arithmetic operator is applied to each entry in the left-hand side vector and its matching element in the right-hand vector. The result is propagated into the result vector with the grouping labels becoming the output label set. Entries for which no matching entry in the right-hand vector can be found are not part of the result. -+ -+##### Examples -+ -+Get proportion of warning logs to error logs for the `foo` app -+ -+> `sum(rate({app=""foo"", level=""warn""}[1m])) / sum(rate({app=""foo"", level=""error""}[1m]))` -+ -+Operators on the same precedence level are left-associative (queries substituted with numbers here for simplicity). For example, 2 * 3 % 2 is equivalent to (2 * 3) % 2. However, some operators have different priorities: 1 + 2 / 3 will still be 1 + ( 2 / 3 ). These function identically to mathematical conventions. -+ -+ -+#### Logical/set binary operators -+ -+These logical/set binary operators are only defined between two vectors: -+ -+- `and` (intersection) -+- `or` (union) -+- `unless` (complement) -+ -+`vector1 and vector2` results in a vector consisting of the elements of vector1 for which there are elements in vector2 with exactly matching label sets. Other elements are dropped. -+ -+`vector1 or vector2` results in a vector that contains all original elements (label sets + values) of vector1 and additionally all elements of vector2 which do not have matching label sets in vector1. -+ -+`vector1 unless vector2` results in a vector consisting of the elements of vector1 for which there are no elements in vector2 with exactly matching label sets. All matching elements in both vectors are dropped. -+ -+##### Examples -+ -+This contrived query will return the intersection of these queries, effectively `rate({app=""bar""})` -+ -+> `rate({app=~""foo|bar""}[1m]) and rate({app=""bar""}[1m])` -diff --git a/pkg/logql/ast.go b/pkg/logql/ast.go -index 84cb803410eb6..e00659f9f5900 100644 ---- a/pkg/logql/ast.go -+++ b/pkg/logql/ast.go -@@ -227,6 +227,17 @@ const ( - OpTypeTopK = ""topk"" - OpTypeCountOverTime = ""count_over_time"" - OpTypeRate = ""rate"" -+ -+ // binops -+ OpTypeOr = ""or"" -+ OpTypeAnd = ""and"" -+ OpTypeUnless = ""unless"" -+ OpTypeAdd = ""+"" -+ OpTypeSub = ""-"" -+ OpTypeMul = ""*"" -+ OpTypeDiv = ""/"" -+ OpTypeMod = ""%"" -+ OpTypePow = ""^"" - ) - - // SampleExpr is a LogQL expression filtering logs and returning metric samples. -@@ -370,6 +381,41 @@ func (e *vectorAggregationExpr) String() string { - return formatOperation(e.operation, e.grouping, params...) - } - -+type binOpExpr struct { -+ SampleExpr -+ RHS SampleExpr -+ op string -+} -+ -+func (e *binOpExpr) String() string { -+ return fmt.Sprintf(""%s %s %s"", e.SampleExpr.String(), e.op, e.RHS.String()) -+} -+ -+func mustNewBinOpExpr(op string, lhs, rhs Expr) SampleExpr { -+ left, ok := lhs.(SampleExpr) -+ if !ok { -+ panic(newParseError(fmt.Sprintf( -+ ""unexpected type for left leg of binary operation (%s): %T"", -+ op, -+ lhs, -+ ), 0, 0)) -+ } -+ -+ right, ok := rhs.(SampleExpr) -+ if !ok { -+ panic(newParseError(fmt.Sprintf( -+ ""unexpected type for right leg of binary operation (%s): %T"", -+ op, -+ rhs, -+ ), 0, 0)) -+ } -+ return &binOpExpr{ -+ SampleExpr: left, -+ RHS: right, -+ op: op, -+ } -+} -+ - // helper used to impl Stringer for vector and range aggregations - // nolint:interfacer - func formatOperation(op string, grouping *grouping, params ...string) string { -diff --git a/pkg/logql/ast_test.go b/pkg/logql/ast_test.go -index 303cf52986ea8..063fecf1561be 100644 ---- a/pkg/logql/ast_test.go -+++ b/pkg/logql/ast_test.go -@@ -43,6 +43,14 @@ func Test_SampleExpr_String(t *testing.T) { - `sum(count_over_time({job=""mysql""}[5m]))`, - `topk(10,sum(rate({region=""us-east1""}[5m])) by (name))`, - `avg( rate( ( {job=""nginx""} |= ""GET"" ) [10s] ) ) by (region)`, -+ `sum by (cluster) (count_over_time({job=""mysql""}[5m]))`, -+ `sum by (cluster) (count_over_time({job=""mysql""}[5m])) / sum by (cluster) (count_over_time({job=""postgres""}[5m])) `, -+ ` -+ sum by (cluster) (count_over_time({job=""postgres""}[5m])) / -+ sum by (cluster) (count_over_time({job=""postgres""}[5m])) / -+ sum by (cluster) (count_over_time({job=""postgres""}[5m])) -+ `, -+ `sum by (cluster) (count_over_time({job=""mysql""}[5m])) / min(count_over_time({job=""mysql""}[5m])) `, - } { - t.Run(tc, func(t *testing.T) { - expr, err := ParseExpr(tc) -diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go -index 5e367852b871f..12d6572fa6e7d 100644 ---- a/pkg/logql/engine.go -+++ b/pkg/logql/engine.go -@@ -213,10 +213,11 @@ func (ng *engine) exec(ctx context.Context, q *query) (promql.Value, error) { - func (ng *engine) evalSample(ctx context.Context, expr SampleExpr, q *query) (promql.Value, error) { - - stepEvaluator, err := ng.evaluator.Evaluator(ctx, expr, q) -- defer helpers.LogError(""closing SampleExpr"", stepEvaluator.Close) - if err != nil { - return nil, err - } -+ defer helpers.LogError(""closing SampleExpr"", stepEvaluator.Close) -+ - seriesIndex := map[uint64]*promql.Series{} - - next, ts, vec := stepEvaluator.Next() -diff --git a/pkg/logql/engine_test.go b/pkg/logql/engine_test.go -index d8717ad485e1b..347ce06687de3 100644 ---- a/pkg/logql/engine_test.go -+++ b/pkg/logql/engine_test.go -@@ -3,6 +3,7 @@ package logql - import ( - ""context"" - ""fmt"" -+ ""math"" - ""testing"" - ""time"" - -@@ -653,13 +654,17 @@ func TestEngine_NewRangeQuery(t *testing.T) { - }, - }, - { -- `bottomk(3,rate(({app=~""foo|bar""} |~"".+bar"")[1m])) without (app)`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, logproto.FORWARD, 100, -+ `bottomk(3,rate(({app=~""foo|bar|fuzz|buzz""} |~"".+bar"")[1m])) without (app)`, time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, logproto.FORWARD, 100, - [][]*logproto.Stream{ -- {newStream(testSize, factor(10, identity), `{app=""foo""}`), newStream(testSize, factor(20, identity), `{app=""bar""}`), -- newStream(testSize, factor(5, identity), `{app=""fuzz""}`), newStream(testSize, identity, `{app=""buzz""}`)}, -+ { -+ newStream(testSize, factor(10, identity), `{app=""foo""}`), -+ newStream(testSize, factor(20, identity), `{app=""bar""}`), -+ newStream(testSize, factor(5, identity), `{app=""fuzz""}`), -+ newStream(testSize, identity, `{app=""buzz""}`), -+ }, - }, - []SelectParams{ -- {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~""foo|bar""}|~"".+bar""`}}, -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~""foo|bar|fuzz|buzz""}|~"".+bar""`}}, - }, - promql.Matrix{ - promql.Series{ -@@ -676,6 +681,289 @@ func TestEngine_NewRangeQuery(t *testing.T) { - }, - }, - }, -+ // binops -+ { -+ `rate({app=""foo""}[1m]) or rate({app=""bar""}[1m])`, -+ time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, logproto.FORWARD, 100, -+ [][]*logproto.Stream{ -+ { -+ newStream(testSize, factor(5, identity), `{app=""foo""}`), -+ }, -+ { -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ }, -+ []SelectParams{ -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=""foo""}`}}, -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=""bar""}`}}, -+ }, -+ promql.Matrix{ -+ promql.Series{ -+ Metric: labels.Labels{{Name: ""app"", Value: ""bar""}}, -+ Points: []promql.Point{{T: 60 * 1000, V: 0.2}, {T: 90 * 1000, V: 0.2}, {T: 120 * 1000, V: 0.2}, {T: 150 * 1000, V: 0.2}, {T: 180 * 1000, V: 0.2}}, -+ }, -+ promql.Series{ -+ Metric: labels.Labels{{Name: ""app"", Value: ""foo""}}, -+ Points: []promql.Point{{T: 60 * 1000, V: 0.2}, {T: 90 * 1000, V: 0.2}, {T: 120 * 1000, V: 0.2}, {T: 150 * 1000, V: 0.2}, {T: 180 * 1000, V: 0.2}}, -+ }, -+ }, -+ }, -+ { -+ ` -+ rate({app=~""foo|bar""}[1m]) and -+ rate({app=""bar""}[1m]) -+ `, -+ time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, logproto.FORWARD, 100, -+ [][]*logproto.Stream{ -+ { -+ newStream(testSize, factor(5, identity), `{app=""foo""}`), -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ { -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ }, -+ []SelectParams{ -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~""foo|bar""}`}}, -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=""bar""}`}}, -+ }, -+ promql.Matrix{ -+ promql.Series{ -+ Metric: labels.Labels{{Name: ""app"", Value: ""bar""}}, -+ Points: []promql.Point{{T: 60 * 1000, V: 0.2}, {T: 90 * 1000, V: 0.2}, {T: 120 * 1000, V: 0.2}, {T: 150 * 1000, V: 0.2}, {T: 180 * 1000, V: 0.2}}, -+ }, -+ }, -+ }, -+ { -+ ` -+ rate({app=~""foo|bar""}[1m]) unless -+ rate({app=""bar""}[1m]) -+ `, -+ time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, logproto.FORWARD, 100, -+ [][]*logproto.Stream{ -+ { -+ newStream(testSize, factor(5, identity), `{app=""foo""}`), -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ { -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ }, -+ []SelectParams{ -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~""foo|bar""}`}}, -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=""bar""}`}}, -+ }, -+ promql.Matrix{ -+ promql.Series{ -+ Metric: labels.Labels{{Name: ""app"", Value: ""foo""}}, -+ Points: []promql.Point{{T: 60 * 1000, V: 0.2}, {T: 90 * 1000, V: 0.2}, {T: 120 * 1000, V: 0.2}, {T: 150 * 1000, V: 0.2}, {T: 180 * 1000, V: 0.2}}, -+ }, -+ }, -+ }, -+ { -+ ` -+ rate({app=~""foo|bar""}[1m]) + -+ rate({app=""bar""}[1m]) -+ `, -+ time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, logproto.FORWARD, 100, -+ [][]*logproto.Stream{ -+ { -+ newStream(testSize, factor(5, identity), `{app=""foo""}`), -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ { -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ }, -+ []SelectParams{ -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~""foo|bar""}`}}, -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=""bar""}`}}, -+ }, -+ promql.Matrix{ -+ promql.Series{ -+ Metric: labels.Labels{{Name: ""app"", Value: ""bar""}}, -+ Points: []promql.Point{{T: 60 * 1000, V: 0.4}, {T: 90 * 1000, V: 0.4}, {T: 120 * 1000, V: 0.4}, {T: 150 * 1000, V: 0.4}, {T: 180 * 1000, V: 0.4}}, -+ }, -+ }, -+ }, -+ { -+ ` -+ rate({app=~""foo|bar""}[1m]) - -+ rate({app=""bar""}[1m]) -+ `, -+ time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, logproto.FORWARD, 100, -+ [][]*logproto.Stream{ -+ { -+ newStream(testSize, factor(5, identity), `{app=""foo""}`), -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ { -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ }, -+ []SelectParams{ -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~""foo|bar""}`}}, -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=""bar""}`}}, -+ }, -+ promql.Matrix{ -+ promql.Series{ -+ Metric: labels.Labels{{Name: ""app"", Value: ""bar""}}, -+ Points: []promql.Point{{T: 60 * 1000, V: 0}, {T: 90 * 1000, V: 0}, {T: 120 * 1000, V: 0}, {T: 150 * 1000, V: 0}, {T: 180 * 1000, V: 0}}, -+ }, -+ }, -+ }, -+ { -+ ` -+ count_over_time({app=~""foo|bar""}[1m]) * -+ count_over_time({app=""bar""}[1m]) -+ `, -+ time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, logproto.FORWARD, 100, -+ [][]*logproto.Stream{ -+ { -+ newStream(testSize, factor(5, identity), `{app=""foo""}`), -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ { -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ }, -+ []SelectParams{ -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~""foo|bar""}`}}, -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=""bar""}`}}, -+ }, -+ promql.Matrix{ -+ promql.Series{ -+ Metric: labels.Labels{{Name: ""app"", Value: ""bar""}}, -+ Points: []promql.Point{{T: 60 * 1000, V: 144}, {T: 90 * 1000, V: 144}, {T: 120 * 1000, V: 144}, {T: 150 * 1000, V: 144}, {T: 180 * 1000, V: 144}}, -+ }, -+ }, -+ }, -+ { -+ ` -+ count_over_time({app=~""foo|bar""}[1m]) * -+ count_over_time({app=""bar""}[1m]) -+ `, -+ time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, logproto.FORWARD, 100, -+ [][]*logproto.Stream{ -+ { -+ newStream(testSize, factor(5, identity), `{app=""foo""}`), -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ { -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ }, -+ []SelectParams{ -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~""foo|bar""}`}}, -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=""bar""}`}}, -+ }, -+ promql.Matrix{ -+ promql.Series{ -+ Metric: labels.Labels{{Name: ""app"", Value: ""bar""}}, -+ Points: []promql.Point{{T: 60 * 1000, V: 144}, {T: 90 * 1000, V: 144}, {T: 120 * 1000, V: 144}, {T: 150 * 1000, V: 144}, {T: 180 * 1000, V: 144}}, -+ }, -+ }, -+ }, -+ { -+ ` -+ count_over_time({app=~""foo|bar""}[1m]) / -+ count_over_time({app=""bar""}[1m]) -+ `, -+ time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, logproto.FORWARD, 100, -+ [][]*logproto.Stream{ -+ { -+ newStream(testSize, factor(5, identity), `{app=""foo""}`), -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ { -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ }, -+ []SelectParams{ -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~""foo|bar""}`}}, -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=""bar""}`}}, -+ }, -+ promql.Matrix{ -+ promql.Series{ -+ Metric: labels.Labels{{Name: ""app"", Value: ""bar""}}, -+ Points: []promql.Point{{T: 60 * 1000, V: 1}, {T: 90 * 1000, V: 1}, {T: 120 * 1000, V: 1}, {T: 150 * 1000, V: 1}, {T: 180 * 1000, V: 1}}, -+ }, -+ }, -+ }, -+ { -+ ` -+ count_over_time({app=~""foo|bar""}[1m]) % -+ count_over_time({app=""bar""}[1m]) -+ `, -+ time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, logproto.FORWARD, 100, -+ [][]*logproto.Stream{ -+ { -+ newStream(testSize, factor(5, identity), `{app=""foo""}`), -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ { -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ }, -+ []SelectParams{ -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~""foo|bar""}`}}, -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=""bar""}`}}, -+ }, -+ promql.Matrix{ -+ promql.Series{ -+ Metric: labels.Labels{{Name: ""app"", Value: ""bar""}}, -+ Points: []promql.Point{{T: 60 * 1000, V: 0}, {T: 90 * 1000, V: 0}, {T: 120 * 1000, V: 0}, {T: 150 * 1000, V: 0}, {T: 180 * 1000, V: 0}}, -+ }, -+ }, -+ }, -+ // tests precedence: should be x + (x/x) -+ { -+ ` -+ sum by (app) (rate({app=~""foo|bar""} |~"".+bar"" [1m])) + -+ sum by (app) (rate({app=~""foo|bar""} |~"".+bar"" [1m])) / -+ sum by (app) (rate({app=~""foo|bar""} |~"".+bar"" [1m])) -+ `, -+ time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, logproto.FORWARD, 100, -+ [][]*logproto.Stream{ -+ { -+ newStream(testSize, factor(5, identity), `{app=""foo""}`), -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ }, -+ []SelectParams{ -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=~""foo|bar""}|~"".+bar""`}}, -+ }, -+ promql.Matrix{ -+ promql.Series{ -+ Metric: labels.Labels{{Name: ""app"", Value: ""bar""}}, -+ Points: []promql.Point{{T: 60 * 1000, V: 1.2}, {T: 90 * 1000, V: 1.2}, {T: 120 * 1000, V: 1.2}, {T: 150 * 1000, V: 1.2}, {T: 180 * 1000, V: 1.2}}, -+ }, -+ promql.Series{ -+ Metric: labels.Labels{{Name: ""app"", Value: ""foo""}}, -+ Points: []promql.Point{{T: 60 * 1000, V: 1.2}, {T: 90 * 1000, V: 1.2}, {T: 120 * 1000, V: 1.2}, {T: 150 * 1000, V: 1.2}, {T: 180 * 1000, V: 1.2}}, -+ }, -+ }, -+ }, -+ { -+ ` -+ count_over_time({app=""bar""}[1m]) ^ count_over_time({app=""bar""}[1m]) -+ `, -+ time.Unix(60, 0), time.Unix(180, 0), 30 * time.Second, logproto.FORWARD, 100, -+ [][]*logproto.Stream{ -+ { -+ newStream(testSize, factor(5, identity), `{app=""bar""}`), -+ }, -+ }, -+ []SelectParams{ -+ {&logproto.QueryRequest{Direction: logproto.FORWARD, Start: time.Unix(0, 0), End: time.Unix(180, 0), Limit: 0, Selector: `{app=""bar""}`}}, -+ }, -+ promql.Matrix{ -+ promql.Series{ -+ Metric: labels.Labels{{Name: ""app"", Value: ""bar""}}, -+ Points: []promql.Point{{T: 60 * 1000, V: math.Pow(12, 12)}, {T: 90 * 1000, V: math.Pow(12, 12)}, {T: 120 * 1000, V: math.Pow(12, 12)}, {T: 150 * 1000, V: math.Pow(12, 12)}, {T: 180 * 1000, V: math.Pow(12, 12)}}, -+ }, -+ }, -+ }, - } { - test := test - t.Run(fmt.Sprintf(""%s %s"", test.qs, test.direction), func(t *testing.T) { -diff --git a/pkg/logql/evaluator.go b/pkg/logql/evaluator.go -index 2fdf8f0d99ab3..b7539121228c2 100644 ---- a/pkg/logql/evaluator.go -+++ b/pkg/logql/evaluator.go -@@ -104,6 +104,8 @@ func (ev *defaultEvaluator) Evaluator(ctx context.Context, expr SampleExpr, q Pa - return ev.vectorAggEvaluator(ctx, e, q) - case *rangeAggregationExpr: - return ev.rangeAggEvaluator(ctx, e, q) -+ case *binOpExpr: -+ return ev.binOpEvaluator(ctx, e, q) - - default: - return nil, errors.Errorf(""unexpected type (%T): %v"", e, e) -@@ -335,3 +337,194 @@ func (ev *defaultEvaluator) rangeAggEvaluator(ctx context.Context, expr *rangeAg - - }, vecIter.Close) - } -+ -+func (ev *defaultEvaluator) binOpEvaluator( -+ ctx context.Context, -+ expr *binOpExpr, -+ q Params, -+) (StepEvaluator, error) { -+ lhs, err := ev.Evaluator(ctx, expr.SampleExpr, q) -+ if err != nil { -+ return nil, err -+ } -+ rhs, err := ev.Evaluator(ctx, expr.RHS, q) -+ if err != nil { -+ return nil, err -+ } -+ -+ return newStepEvaluator(func() (bool, int64, promql.Vector) { -+ pairs := map[uint64][2]*promql.Sample{} -+ var ts int64 -+ -+ // populate pairs -+ for i, eval := range []StepEvaluator{lhs, rhs} { -+ next, timestamp, vec := eval.Next() -+ ts = timestamp -+ -+ // These should _always_ happen at the same step on each evaluator. -+ if !next { -+ return next, ts, nil -+ } -+ -+ for _, sample := range vec { -+ // TODO(owen-d): this seems wildly inefficient: we're calculating -+ // the hash on each sample & step per evaluator. -+ // We seem limited to this approach due to using the StepEvaluator ifc. -+ hash := sample.Metric.Hash() -+ pair := pairs[hash] -+ pair[i] = &promql.Sample{ -+ Metric: sample.Metric, -+ Point: sample.Point, -+ } -+ pairs[hash] = pair -+ } -+ } -+ -+ results := make(promql.Vector, 0, len(pairs)) -+ for _, pair := range pairs { -+ -+ // merge -+ if merged := ev.mergeBinOp(expr.op, pair[0], pair[1]); merged != nil { -+ results = append(results, *merged) -+ } -+ } -+ -+ return true, ts, results -+ }, func() (lastError error) { -+ for _, ev := range []StepEvaluator{lhs, rhs} { -+ if err := ev.Close(); err != nil { -+ lastError = err -+ } -+ } -+ return lastError -+ }) -+} -+ -+func (ev *defaultEvaluator) mergeBinOp(op string, left, right *promql.Sample) *promql.Sample { -+ var merger func(left, right *promql.Sample) *promql.Sample -+ -+ switch op { -+ case OpTypeOr: -+ merger = func(left, right *promql.Sample) *promql.Sample { -+ // return the left entry found (prefers left hand side) -+ if left != nil { -+ return left -+ } -+ return right -+ } -+ -+ case OpTypeAnd: -+ merger = func(left, right *promql.Sample) *promql.Sample { -+ // return left sample if there's a second sample for that label set -+ if left != nil && right != nil { -+ return left -+ } -+ return nil -+ } -+ -+ case OpTypeUnless: -+ merger = func(left, right *promql.Sample) *promql.Sample { -+ // return left sample if there's not a second sample for that label set -+ if right == nil { -+ return left -+ } -+ return nil -+ } -+ -+ case OpTypeAdd: -+ merger = func(left, right *promql.Sample) *promql.Sample { -+ if left == nil || right == nil { -+ return nil -+ } -+ res := promql.Sample{ -+ Metric: left.Metric, -+ Point: left.Point, -+ } -+ res.Point.V += right.Point.V -+ return &res -+ } -+ -+ case OpTypeSub: -+ merger = func(left, right *promql.Sample) *promql.Sample { -+ if left == nil || right == nil { -+ return nil -+ } -+ res := promql.Sample{ -+ Metric: left.Metric, -+ Point: left.Point, -+ } -+ res.Point.V -= right.Point.V -+ return &res -+ } -+ -+ case OpTypeMul: -+ merger = func(left, right *promql.Sample) *promql.Sample { -+ if left == nil || right == nil { -+ return nil -+ } -+ res := promql.Sample{ -+ Metric: left.Metric, -+ Point: left.Point, -+ } -+ res.Point.V *= right.Point.V -+ return &res -+ } -+ -+ case OpTypeDiv: -+ merger = func(left, right *promql.Sample) *promql.Sample { -+ if left == nil || right == nil { -+ return nil -+ } -+ res := promql.Sample{ -+ Metric: left.Metric.Copy(), -+ Point: left.Point, -+ } -+ -+ // guard against divide by zero -+ if right.Point.V == 0 { -+ res.Point.V = math.NaN() -+ } else { -+ res.Point.V /= right.Point.V -+ } -+ return &res -+ } -+ -+ case OpTypeMod: -+ merger = func(left, right *promql.Sample) *promql.Sample { -+ if left == nil || right == nil { -+ return nil -+ } -+ res := promql.Sample{ -+ Metric: left.Metric, -+ Point: left.Point, -+ } -+ // guard against divide by zero -+ if right.Point.V == 0 { -+ res.Point.V = math.NaN() -+ } else { -+ res.Point.V = math.Mod(res.Point.V, right.Point.V) -+ } -+ return &res -+ } -+ -+ case OpTypePow: -+ merger = func(left, right *promql.Sample) *promql.Sample { -+ if left == nil || right == nil { -+ return nil -+ } -+ -+ res := promql.Sample{ -+ Metric: left.Metric, -+ Point: left.Point, -+ } -+ res.Point.V = math.Pow(left.Point.V, right.Point.V) -+ return &res -+ } -+ -+ default: -+ panic(errors.Errorf(""should never happen: unexpected operation: (%s)"", op)) -+ } -+ -+ return merger(left, right) -+ -+} -diff --git a/pkg/logql/evaluator_test.go b/pkg/logql/evaluator_test.go -new file mode 100644 -index 0000000000000..23ef3d2f71c94 ---- /dev/null -+++ b/pkg/logql/evaluator_test.go -@@ -0,0 +1,31 @@ -+package logql -+ -+import ( -+ ""math"" -+ ""testing"" -+ -+ ""github.com/prometheus/prometheus/promql"" -+ ""github.com/stretchr/testify/require"" -+) -+ -+func TestDefaultEvaluator_DivideByZero(t *testing.T) { -+ ev := &defaultEvaluator{} -+ -+ require.Equal(t, true, math.IsNaN(ev.mergeBinOp(OpTypeDiv, -+ &promql.Sample{ -+ Point: promql.Point{T: 1, V: 1}, -+ }, -+ &promql.Sample{ -+ Point: promql.Point{T: 1, V: 0}, -+ }, -+ ).Point.V)) -+ -+ require.Equal(t, true, math.IsNaN(ev.mergeBinOp(OpTypeMod, -+ &promql.Sample{ -+ Point: promql.Point{T: 1, V: 1}, -+ }, -+ &promql.Sample{ -+ Point: promql.Point{T: 1, V: 0}, -+ }, -+ ).Point.V)) -+} -diff --git a/pkg/logql/expr.y b/pkg/logql/expr.y -index a053b3224d36e..9a811985191f6 100644 ---- a/pkg/logql/expr.y -+++ b/pkg/logql/expr.y -@@ -21,6 +21,8 @@ import ( - Selector []*labels.Matcher - VectorAggregationExpr SampleExpr - VectorOp string -+ BinOpExpr SampleExpr -+ binOp string - str string - duration time.Duration - int int64 -@@ -41,26 +43,36 @@ import ( - %type selector - %type vectorAggregationExpr - %type vectorOp -+%type binOpExpr - - %token IDENTIFIER STRING - %token DURATION - %token MATCHERS LABELS EQ NEQ RE NRE OPEN_BRACE CLOSE_BRACE OPEN_BRACKET CLOSE_BRACKET COMMA DOT PIPE_MATCH PIPE_EXACT - OPEN_PARENTHESIS CLOSE_PARENTHESIS BY WITHOUT COUNT_OVER_TIME RATE SUM AVG MAX MIN COUNT STDDEV STDVAR BOTTOMK TOPK - -+// Operators are listed with increasing precedence. -+%left OR -+%left AND UNLESS -+%left ADD SUB -+%left MUL DIV MOD -+%right POW -+ - %% - - root: expr { exprlex.(*lexer).expr = $1 }; - - expr: -- logExpr { $$ = $1 } -- | rangeAggregationExpr { $$ = $1 } -- | vectorAggregationExpr { $$ = $1 } -+ logExpr { $$ = $1 } -+ | rangeAggregationExpr { $$ = $1 } -+ | vectorAggregationExpr { $$ = $1 } -+ | binOpExpr { $$ = $1 } -+ | OPEN_PARENTHESIS expr CLOSE_PARENTHESIS { $$ = $2 } - ; - - logExpr: - selector { $$ = newMatcherExpr($1)} - | logExpr filter STRING { $$ = NewFilterExpr( $1, $2, $3 ) } -- | OPEN_PARENTHESIS logExpr CLOSE_PARENTHESIS { $$ = $2} -+ | OPEN_PARENTHESIS logExpr CLOSE_PARENTHESIS { $$ = $2 } - | logExpr filter error - | logExpr error - ; -@@ -115,6 +127,22 @@ matcher: - | IDENTIFIER NRE STRING { $$ = mustNewMatcher(labels.MatchNotRegexp, $1, $3) } - ; - -+// TODO(owen-d): add (on,ignoring) clauses to binOpExpr -+// Comparison operators are currently avoided due to symbol collisions in our grammar: ""!="" means not equal in prometheus, -+// but is part of our filter grammar. -+// reference: https://prometheus.io/docs/prometheus/latest/querying/operators/ -+// Operator precedence only works if each of these is listed separately. -+binOpExpr: -+ expr OR expr { $$ = mustNewBinOpExpr(""or"", $1, $3) } -+ | expr AND expr { $$ = mustNewBinOpExpr(""and"", $1, $3) } -+ | expr UNLESS expr { $$ = mustNewBinOpExpr(""unless"", $1, $3) } -+ | expr ADD expr { $$ = mustNewBinOpExpr(""+"", $1, $3) } -+ | expr SUB expr { $$ = mustNewBinOpExpr(""-"", $1, $3) } -+ | expr MUL expr { $$ = mustNewBinOpExpr(""*"", $1, $3) } -+ | expr DIV expr { $$ = mustNewBinOpExpr(""/"", $1, $3) } -+ | expr MOD expr { $$ = mustNewBinOpExpr(""%"", $1, $3) } -+ | expr POW expr { $$ = mustNewBinOpExpr(""^"", $1, $3) } -+ - vectorOp: - SUM { $$ = OpTypeSum } - | AVG { $$ = OpTypeAvg } -diff --git a/pkg/logql/expr.y.go b/pkg/logql/expr.y.go -index 4730258790db9..6e2a851a6a0a8 100644 ---- a/pkg/logql/expr.y.go -+++ b/pkg/logql/expr.y.go -@@ -25,6 +25,8 @@ type exprSymType struct { - Selector []*labels.Matcher - VectorAggregationExpr SampleExpr - VectorOp string -+ BinOpExpr SampleExpr -+ binOp string - str string - duration time.Duration - int int64 -@@ -62,6 +64,15 @@ const STDDEV = 57374 - const STDVAR = 57375 - const BOTTOMK = 57376 - const TOPK = 57377 -+const OR = 57378 -+const AND = 57379 -+const UNLESS = 57380 -+const ADD = 57381 -+const SUB = 57382 -+const MUL = 57383 -+const DIV = 57384 -+const MOD = 57385 -+const POW = 57386 - - var exprToknames = [...]string{ - ""$end"", -@@ -99,6 +110,15 @@ var exprToknames = [...]string{ - ""STDVAR"", - ""BOTTOMK"", - ""TOPK"", -+ ""OR"", -+ ""AND"", -+ ""UNLESS"", -+ ""ADD"", -+ ""SUB"", -+ ""MUL"", -+ ""DIV"", -+ ""MOD"", -+ ""POW"", - } - var exprStatenames = [...]string{} - -@@ -113,92 +133,127 @@ var exprExca = [...]int{ - -2, 0, - -1, 3, - 1, 2, -+ 22, 2, -+ 36, 2, -+ 37, 2, -+ 38, 2, -+ 39, 2, -+ 40, 2, -+ 41, 2, -+ 42, 2, -+ 43, 2, -+ 44, 2, -+ -2, 0, -+ -1, 39, -+ 36, 2, -+ 37, 2, -+ 38, 2, -+ 39, 2, -+ 40, 2, -+ 41, 2, -+ 42, 2, -+ 43, 2, -+ 44, 2, - -2, 0, - } - - const exprPrivate = 57344 - --const exprLast = 149 -+const exprLast = 202 - - var exprAct = [...]int{ - -- 31, 5, 4, 22, 36, 69, 10, 41, 30, 49, -- 32, 33, 86, 46, 7, 32, 33, 88, 11, 12, -- 13, 14, 16, 17, 15, 18, 19, 20, 21, 90, -- 89, 48, 45, 44, 11, 12, 13, 14, 16, 17, -- 15, 18, 19, 20, 21, 58, 85, 86, 3, 68, -- 67, 63, 87, 84, 23, 71, 28, 72, 61, 65, -- 64, 47, 27, 29, 26, 80, 81, 58, 82, 83, -- 66, 24, 25, 53, 40, 76, 75, 74, 42, 11, -- 12, 13, 14, 16, 17, 15, 18, 19, 20, 21, -- 92, 93, 62, 59, 10, 78, 10, 59, 77, 73, -- 91, 27, 43, 26, 7, 27, 37, 26, 23, 51, -- 24, 25, 70, 79, 24, 25, 27, 60, 26, 23, -- 9, 50, 23, 61, 52, 24, 25, 27, 40, 26, -- 27, 39, 26, 35, 38, 37, 24, 25, 6, 24, -- 25, 54, 55, 56, 57, 8, 34, 2, 1, -+ 42, 5, 4, 32, 47, 90, 60, 3, 62, 26, -+ 27, 28, 29, 30, 31, 39, 28, 29, 30, 31, -+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 23, -+ 24, 25, 26, 27, 28, 29, 30, 31, 31, 41, -+ 70, 43, 44, 66, 65, 43, 44, 111, 63, 24, -+ 25, 26, 27, 28, 29, 30, 31, 33, 107, 107, -+ 110, 82, 106, 109, 108, 37, 79, 36, 105, 86, -+ 89, 88, 83, 84, 34, 35, 92, 61, 93, 85, -+ 69, 68, 40, 11, 87, 11, 101, 102, 79, 103, -+ 104, 7, 67, 64, 72, 12, 13, 14, 15, 17, -+ 18, 16, 19, 20, 21, 22, 71, 74, 97, 73, -+ 96, 113, 114, 12, 13, 14, 15, 17, 18, 16, -+ 19, 20, 21, 22, 12, 13, 14, 15, 17, 18, -+ 16, 19, 20, 21, 22, 2, 80, 95, 99, 59, -+ 33, 98, 58, 38, 37, 94, 36, 46, 37, 48, -+ 36, 112, 48, 34, 35, 91, 100, 34, 35, 49, -+ 50, 51, 52, 53, 54, 55, 56, 57, 80, 6, -+ 10, 8, 33, 9, 45, 1, 37, 0, 36, 0, -+ 37, 0, 36, 33, 0, 34, 35, 82, 81, 34, -+ 35, 37, 61, 36, 75, 76, 77, 78, 0, 0, -+ 34, 35, - } - var exprPact = [...]int{ - -- -7, -1000, -1000, 120, -1000, -1000, -1000, 83, 42, -13, -- 131, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -- -1000, -1000, 129, -1000, -1000, -1000, -1000, -1000, 106, 81, -- 9, 40, 10, -12, 107, 59, -1000, 132, -1000, -1000, -- -1000, 95, 117, 81, 38, 37, 53, 54, 108, 108, -- -1000, -1000, 102, -1000, 94, 72, 71, 70, 93, -1000, -- -1000, -1000, 52, 91, -8, -8, 54, 31, 24, 30, -- -1000, -5, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -- -1000, -1000, 8, 7, -1000, -1000, 96, -1000, -1000, -8, -- -8, -1000, -1000, -1000, -+ 70, -1000, -7, 138, -1000, -1000, -1000, 70, -1000, 61, -+ 18, 145, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -+ -1000, -1000, -1000, 70, 70, 70, 70, 70, 70, 70, -+ 70, 70, 137, -1000, -1000, -1000, -1000, -1000, -16, 170, -+ 72, 88, 60, 59, 19, 92, 93, -1000, 185, 12, -+ -30, -30, -25, -25, -6, -6, -6, -6, -1000, -1000, -+ -1000, -1000, 166, 181, 72, 57, 47, 67, 99, 151, -+ 151, -1000, -1000, 148, -1000, 140, 132, 105, 103, 136, -+ -1000, -1000, -1000, 55, 134, 22, 22, 99, 46, 40, -+ 42, -1000, 41, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -+ -1000, -1000, -1000, 38, 25, -1000, -1000, 147, -1000, -1000, -+ 22, 22, -1000, -1000, -1000, - } - var exprPgo = [...]int{ - -- 0, 148, 147, 3, 0, 5, 48, 7, 4, 146, -- 2, 145, 138, 1, 120, -+ 0, 175, 135, 3, 0, 5, 7, 8, 4, 174, -+ 2, 173, 171, 1, 170, 169, - } - var exprR1 = [...]int{ - -- 0, 1, 2, 2, 2, 6, 6, 6, 6, 6, -- 7, 7, 7, 7, 7, 10, 13, 13, 13, 13, -- 13, 13, 13, 13, 13, 13, 3, 3, 3, 3, -- 12, 12, 12, 9, 9, 8, 8, 8, 8, 14, -- 14, 14, 14, 14, 14, 14, 14, 14, 11, 11, -- 5, 5, 4, 4, -+ 0, 1, 2, 2, 2, 2, 2, 6, 6, 6, -+ 6, 6, 7, 7, 7, 7, 7, 10, 13, 13, -+ 13, 13, 13, 13, 13, 13, 13, 13, 3, 3, -+ 3, 3, 12, 12, 12, 9, 9, 8, 8, 8, -+ 8, 15, 15, 15, 15, 15, 15, 15, 15, 15, -+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 11, -+ 11, 5, 5, 4, 4, - } - var exprR2 = [...]int{ - -- 0, 1, 1, 1, 1, 1, 3, 3, 3, 2, -- 2, 3, 3, 3, 2, 4, 4, 4, 5, 5, -- 5, 5, 6, 7, 6, 7, 1, 1, 1, 1, -- 3, 3, 3, 1, 3, 3, 3, 3, 3, 1, -+ 0, 1, 1, 1, 1, 1, 3, 1, 3, 3, -+ 3, 2, 2, 3, 3, 3, 2, 4, 4, 4, -+ 5, 5, 5, 5, 6, 7, 6, 7, 1, 1, -+ 1, 1, 3, 3, 3, 1, 3, 3, 3, 3, -+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -- 1, 3, 4, 4, -+ 1, 1, 3, 4, 4, - } - var exprChk = [...]int{ - -- -1000, -1, -2, -6, -10, -13, -12, 21, -11, -14, -- 13, 25, 26, 27, 28, 31, 29, 30, 32, 33, -- 34, 35, -3, 2, 19, 20, 12, 10, -6, 21, -- 21, -4, 23, 24, -9, 2, -8, 4, 5, 2, -- 22, -7, -6, 21, -10, -13, 4, 21, 21, 21, -- 14, 2, 17, 14, 9, 10, 11, 12, -3, 2, -- 22, 6, -6, -7, 22, 22, 17, -10, -13, -5, -- 4, -5, -8, 5, 5, 5, 5, 5, 2, 22, -- -4, -4, -13, -10, 22, 22, 17, 22, 22, 22, -- 22, 4, -4, -4, -+ -1000, -1, -2, -6, -10, -13, -15, 21, -12, -11, -+ -14, 13, 25, 26, 27, 28, 31, 29, 30, 32, -+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, -+ 43, 44, -3, 2, 19, 20, 12, 10, -2, -6, -+ 21, 21, -4, 23, 24, -9, 2, -8, 4, -2, -+ -2, -2, -2, -2, -2, -2, -2, -2, 5, 2, -+ 22, 22, -7, -6, 21, -10, -13, 4, 21, 21, -+ 21, 14, 2, 17, 14, 9, 10, 11, 12, -3, -+ 2, 22, 6, -6, -7, 22, 22, 17, -10, -13, -+ -5, 4, -5, -8, 5, 5, 5, 5, 5, 2, -+ 22, -4, -4, -13, -10, 22, 22, 17, 22, 22, -+ 22, 22, 4, -4, -4, - } - var exprDef = [...]int{ - -- 0, -2, 1, -2, 3, 4, 5, 0, 0, 0, -- 0, 48, 49, 39, 40, 41, 42, 43, 44, 45, -- 46, 47, 0, 9, 26, 27, 28, 29, 0, 0, -- 0, 0, 0, 0, 0, 0, 33, 0, 6, 8, -- 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, -- 30, 31, 0, 32, 0, 0, 0, 0, 0, 14, -- 15, 10, 0, 0, 16, 17, 0, 0, 0, 0, -- 50, 0, 34, 35, 36, 37, 38, 11, 13, 12, -- 20, 21, 0, 0, 18, 19, 0, 52, 53, 22, -- 24, 51, 23, 25, -+ 0, -2, 1, -2, 3, 4, 5, 0, 7, 0, -+ 0, 0, 59, 60, 50, 51, 52, 53, 54, 55, -+ 56, 57, 58, 0, 0, 0, 0, 0, 0, 0, -+ 0, 0, 0, 11, 28, 29, 30, 31, 0, -2, -+ 0, 0, 0, 0, 0, 0, 0, 35, 0, 41, -+ 42, 43, 44, 45, 46, 47, 48, 49, 8, 10, -+ 6, 9, 0, 0, 0, 0, 0, 0, 0, 0, -+ 0, 32, 33, 0, 34, 0, 0, 0, 0, 0, -+ 16, 17, 12, 0, 0, 18, 19, 0, 0, 0, -+ 0, 61, 0, 36, 37, 38, 39, 40, 13, 15, -+ 14, 22, 23, 0, 0, 20, 21, 0, 63, 64, -+ 24, 26, 62, 25, 27, - } - var exprTok1 = [...]int{ - -@@ -209,7 +264,8 @@ var exprTok2 = [...]int{ - 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, -- 32, 33, 34, 35, -+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, -+ 42, 43, 44, - } - var exprTok3 = [...]int{ - 0, -@@ -574,223 +630,278 @@ exprdefault: - case 5: - exprDollar = exprS[exprpt-1 : exprpt+1] - { -- exprVAL.LogExpr = newMatcherExpr(exprDollar[1].Selector) -+ exprVAL.Expr = exprDollar[1].BinOpExpr - } - case 6: - exprDollar = exprS[exprpt-3 : exprpt+1] - { -- exprVAL.LogExpr = NewFilterExpr(exprDollar[1].LogExpr, exprDollar[2].Filter, exprDollar[3].str) -+ exprVAL.Expr = exprDollar[2].Expr - } - case 7: -+ exprDollar = exprS[exprpt-1 : exprpt+1] -+ { -+ exprVAL.LogExpr = newMatcherExpr(exprDollar[1].Selector) -+ } -+ case 8: -+ exprDollar = exprS[exprpt-3 : exprpt+1] -+ { -+ exprVAL.LogExpr = NewFilterExpr(exprDollar[1].LogExpr, exprDollar[2].Filter, exprDollar[3].str) -+ } -+ case 9: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LogExpr = exprDollar[2].LogExpr - } -- case 10: -+ case 12: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(exprDollar[1].LogExpr, exprDollar[2].duration) - } -- case 11: -+ case 13: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LogRangeExpr = addFilterToLogRangeExpr(exprDollar[1].LogRangeExpr, exprDollar[2].Filter, exprDollar[3].str) - } -- case 12: -+ case 14: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LogRangeExpr = exprDollar[2].LogRangeExpr - } -- case 15: -+ case 17: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.RangeAggregationExpr = newRangeAggregationExpr(exprDollar[3].LogRangeExpr, exprDollar[1].RangeOp) - } -- case 16: -+ case 18: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[3].RangeAggregationExpr, exprDollar[1].VectorOp, nil, nil) - } -- case 17: -+ case 19: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[3].VectorAggregationExpr, exprDollar[1].VectorOp, nil, nil) - } -- case 18: -+ case 20: - exprDollar = exprS[exprpt-5 : exprpt+1] - { - exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[4].RangeAggregationExpr, exprDollar[1].VectorOp, exprDollar[2].Grouping, nil) - } -- case 19: -+ case 21: - exprDollar = exprS[exprpt-5 : exprpt+1] - { - exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[4].VectorAggregationExpr, exprDollar[1].VectorOp, exprDollar[2].Grouping, nil) - } -- case 20: -+ case 22: - exprDollar = exprS[exprpt-5 : exprpt+1] - { - exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[3].RangeAggregationExpr, exprDollar[1].VectorOp, exprDollar[5].Grouping, nil) - } -- case 21: -+ case 23: - exprDollar = exprS[exprpt-5 : exprpt+1] - { - exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[3].VectorAggregationExpr, exprDollar[1].VectorOp, exprDollar[5].Grouping, nil) - } -- case 22: -+ case 24: - exprDollar = exprS[exprpt-6 : exprpt+1] - { - exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[5].VectorAggregationExpr, exprDollar[1].VectorOp, nil, &exprDollar[3].str) - } -- case 23: -+ case 25: - exprDollar = exprS[exprpt-7 : exprpt+1] - { - exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[5].VectorAggregationExpr, exprDollar[1].VectorOp, exprDollar[7].Grouping, &exprDollar[3].str) - } -- case 24: -+ case 26: - exprDollar = exprS[exprpt-6 : exprpt+1] - { - exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[5].RangeAggregationExpr, exprDollar[1].VectorOp, nil, &exprDollar[3].str) - } -- case 25: -+ case 27: - exprDollar = exprS[exprpt-7 : exprpt+1] - { - exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[5].RangeAggregationExpr, exprDollar[1].VectorOp, exprDollar[7].Grouping, &exprDollar[3].str) - } -- case 26: -+ case 28: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.Filter = labels.MatchRegexp - } -- case 27: -+ case 29: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.Filter = labels.MatchEqual - } -- case 28: -+ case 30: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.Filter = labels.MatchNotRegexp - } -- case 29: -+ case 31: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.Filter = labels.MatchNotEqual - } -- case 30: -+ case 32: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Selector = exprDollar[2].Matchers - } -- case 31: -+ case 33: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Selector = exprDollar[2].Matchers - } -- case 32: -+ case 34: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - } -- case 33: -+ case 35: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.Matchers = []*labels.Matcher{exprDollar[1].Matcher} - } -- case 34: -+ case 36: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Matchers = append(exprDollar[1].Matchers, exprDollar[3].Matcher) - } -- case 35: -+ case 37: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Matcher = mustNewMatcher(labels.MatchEqual, exprDollar[1].str, exprDollar[3].str) - } -- case 36: -+ case 38: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Matcher = mustNewMatcher(labels.MatchNotEqual, exprDollar[1].str, exprDollar[3].str) - } -- case 37: -+ case 39: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Matcher = mustNewMatcher(labels.MatchRegexp, exprDollar[1].str, exprDollar[3].str) - } -- case 38: -+ case 40: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Matcher = mustNewMatcher(labels.MatchNotRegexp, exprDollar[1].str, exprDollar[3].str) - } -- case 39: -+ case 41: -+ exprDollar = exprS[exprpt-3 : exprpt+1] -+ { -+ exprVAL.BinOpExpr = mustNewBinOpExpr(""or"", exprDollar[1].Expr, exprDollar[3].Expr) -+ } -+ case 42: -+ exprDollar = exprS[exprpt-3 : exprpt+1] -+ { -+ exprVAL.BinOpExpr = mustNewBinOpExpr(""and"", exprDollar[1].Expr, exprDollar[3].Expr) -+ } -+ case 43: -+ exprDollar = exprS[exprpt-3 : exprpt+1] -+ { -+ exprVAL.BinOpExpr = mustNewBinOpExpr(""unless"", exprDollar[1].Expr, exprDollar[3].Expr) -+ } -+ case 44: -+ exprDollar = exprS[exprpt-3 : exprpt+1] -+ { -+ exprVAL.BinOpExpr = mustNewBinOpExpr(""+"", exprDollar[1].Expr, exprDollar[3].Expr) -+ } -+ case 45: -+ exprDollar = exprS[exprpt-3 : exprpt+1] -+ { -+ exprVAL.BinOpExpr = mustNewBinOpExpr(""-"", exprDollar[1].Expr, exprDollar[3].Expr) -+ } -+ case 46: -+ exprDollar = exprS[exprpt-3 : exprpt+1] -+ { -+ exprVAL.BinOpExpr = mustNewBinOpExpr(""*"", exprDollar[1].Expr, exprDollar[3].Expr) -+ } -+ case 47: -+ exprDollar = exprS[exprpt-3 : exprpt+1] -+ { -+ exprVAL.BinOpExpr = mustNewBinOpExpr(""/"", exprDollar[1].Expr, exprDollar[3].Expr) -+ } -+ case 48: -+ exprDollar = exprS[exprpt-3 : exprpt+1] -+ { -+ exprVAL.BinOpExpr = mustNewBinOpExpr(""%"", exprDollar[1].Expr, exprDollar[3].Expr) -+ } -+ case 49: -+ exprDollar = exprS[exprpt-3 : exprpt+1] -+ { -+ exprVAL.BinOpExpr = mustNewBinOpExpr(""^"", exprDollar[1].Expr, exprDollar[3].Expr) -+ } -+ case 50: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeSum - } -- case 40: -+ case 51: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeAvg - } -- case 41: -+ case 52: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeCount - } -- case 42: -+ case 53: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeMax - } -- case 43: -+ case 54: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeMin - } -- case 44: -+ case 55: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeStddev - } -- case 45: -+ case 56: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeStdvar - } -- case 46: -+ case 57: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeBottomK - } -- case 47: -+ case 58: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeTopK - } -- case 48: -+ case 59: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpTypeCountOverTime - } -- case 49: -+ case 60: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpTypeRate - } -- case 50: -+ case 61: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.Labels = []string{exprDollar[1].str} - } -- case 51: -+ case 62: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Labels = append(exprDollar[1].Labels, exprDollar[3].str) - } -- case 52: -+ case 63: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.Grouping = &grouping{without: false, groups: exprDollar[3].Labels} - } -- case 53: -+ case 64: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.Grouping = &grouping{without: true, groups: exprDollar[3].Labels} -diff --git a/pkg/logql/lex.go b/pkg/logql/lex.go -index d368e7b69e9aa..eedc0dbc9b572 100644 ---- a/pkg/logql/lex.go -+++ b/pkg/logql/lex.go -@@ -36,6 +36,17 @@ var tokens = map[string]int{ - OpTypeStdvar: STDVAR, - OpTypeBottomK: BOTTOMK, - OpTypeTopK: TOPK, -+ -+ // binops -+ OpTypeOr: OR, -+ OpTypeAnd: AND, -+ OpTypeUnless: UNLESS, -+ OpTypeAdd: ADD, -+ OpTypeSub: SUB, -+ OpTypeMul: MUL, -+ OpTypeDiv: DIV, -+ OpTypeMod: MOD, -+ OpTypePow: POW, - } - - type lexer struct { -diff --git a/pkg/logql/parser_test.go b/pkg/logql/parser_test.go -index 97b5b3742783d..a2c9d0103427f 100644 ---- a/pkg/logql/parser_test.go -+++ b/pkg/logql/parser_test.go -@@ -534,7 +534,7 @@ func TestParse(t *testing.T) { - { - in: `{foo=""bar""} ""foo""`, - err: ParseError{ -- msg: ""syntax error: unexpected STRING, expecting != or !~ or |~ or |="", -+ msg: ""syntax error: unexpected STRING"", - line: 1, - col: 13, - }, -@@ -542,11 +542,158 @@ func TestParse(t *testing.T) { - { - in: `{foo=""bar""} foo`, - err: ParseError{ -- msg: ""syntax error: unexpected IDENTIFIER, expecting != or !~ or |~ or |="", -+ msg: ""syntax error: unexpected IDENTIFIER"", - line: 1, - col: 13, - }, - }, -+ { -+ in: ` -+ sum(count_over_time({foo=""bar""}[5m])) by (foo) ^ -+ sum(count_over_time({foo=""bar""}[5m])) by (foo) / -+ sum(count_over_time({foo=""bar""}[5m])) by (foo) -+ `, -+ exp: mustNewBinOpExpr( -+ OpTypeDiv, -+ mustNewBinOpExpr( -+ OpTypePow, -+ mustNewVectorAggregationExpr(newRangeAggregationExpr( -+ &logRange{ -+ left: &matchersExpr{ -+ matchers: []*labels.Matcher{ -+ mustNewMatcher(labels.MatchEqual, ""foo"", ""bar""), -+ }, -+ }, -+ interval: 5 * time.Minute, -+ }, OpTypeCountOverTime), -+ ""sum"", -+ &grouping{ -+ without: false, -+ groups: []string{""foo""}, -+ }, -+ nil, -+ ), -+ mustNewVectorAggregationExpr(newRangeAggregationExpr( -+ &logRange{ -+ left: &matchersExpr{ -+ matchers: []*labels.Matcher{ -+ mustNewMatcher(labels.MatchEqual, ""foo"", ""bar""), -+ }, -+ }, -+ interval: 5 * time.Minute, -+ }, OpTypeCountOverTime), -+ ""sum"", -+ &grouping{ -+ without: false, -+ groups: []string{""foo""}, -+ }, -+ nil, -+ ), -+ ), -+ mustNewVectorAggregationExpr(newRangeAggregationExpr( -+ &logRange{ -+ left: &matchersExpr{ -+ matchers: []*labels.Matcher{ -+ mustNewMatcher(labels.MatchEqual, ""foo"", ""bar""), -+ }, -+ }, -+ interval: 5 * time.Minute, -+ }, OpTypeCountOverTime), -+ ""sum"", -+ &grouping{ -+ without: false, -+ groups: []string{""foo""}, -+ }, -+ nil, -+ ), -+ ), -+ }, -+ { -+ // operator precedence before left associativity -+ in: ` -+ sum(count_over_time({foo=""bar""}[5m])) by (foo) + -+ sum(count_over_time({foo=""bar""}[5m])) by (foo) / -+ sum(count_over_time({foo=""bar""}[5m])) by (foo) -+ `, -+ exp: mustNewBinOpExpr( -+ OpTypeAdd, -+ mustNewVectorAggregationExpr(newRangeAggregationExpr( -+ &logRange{ -+ left: &matchersExpr{ -+ matchers: []*labels.Matcher{ -+ mustNewMatcher(labels.MatchEqual, ""foo"", ""bar""), -+ }, -+ }, -+ interval: 5 * time.Minute, -+ }, OpTypeCountOverTime), -+ ""sum"", -+ &grouping{ -+ without: false, -+ groups: []string{""foo""}, -+ }, -+ nil, -+ ), -+ mustNewBinOpExpr( -+ OpTypeDiv, -+ mustNewVectorAggregationExpr(newRangeAggregationExpr( -+ &logRange{ -+ left: &matchersExpr{ -+ matchers: []*labels.Matcher{ -+ mustNewMatcher(labels.MatchEqual, ""foo"", ""bar""), -+ }, -+ }, -+ interval: 5 * time.Minute, -+ }, OpTypeCountOverTime), -+ ""sum"", -+ &grouping{ -+ without: false, -+ groups: []string{""foo""}, -+ }, -+ nil, -+ ), -+ mustNewVectorAggregationExpr(newRangeAggregationExpr( -+ &logRange{ -+ left: &matchersExpr{ -+ matchers: []*labels.Matcher{ -+ mustNewMatcher(labels.MatchEqual, ""foo"", ""bar""), -+ }, -+ }, -+ interval: 5 * time.Minute, -+ }, OpTypeCountOverTime), -+ ""sum"", -+ &grouping{ -+ without: false, -+ groups: []string{""foo""}, -+ }, -+ nil, -+ ), -+ ), -+ ), -+ }, -+ { -+ in: `{foo=""bar""} + {foo=""bar""}`, -+ err: ParseError{ -+ msg: `unexpected type for left leg of binary operation (+): *logql.matchersExpr`, -+ line: 0, -+ col: 0, -+ }, -+ }, -+ { -+ in: `sum(count_over_time({foo=""bar""}[5m])) by (foo) - {foo=""bar""}`, -+ err: ParseError{ -+ msg: `unexpected type for right leg of binary operation (-): *logql.matchersExpr`, -+ line: 0, -+ col: 0, -+ }, -+ }, -+ { -+ in: `{foo=""bar""} / sum(count_over_time({foo=""bar""}[5m])) by (foo)`, -+ err: ParseError{ -+ msg: `unexpected type for left leg of binary operation (/): *logql.matchersExpr`, -+ line: 0, -+ col: 0, -+ }, -+ }, - } { - t.Run(tc.in, func(t *testing.T) { - ast, err := ParseExpr(tc.in)",unknown,"Binary operators in LogQL (#1662) - -* binops in ast - -* bin op associativity & precedence - -* binOpEvaluator work - -* defers close only if constructed without error - -* tests binary ops - -* more binops - -* updates docs - -* changelog - -* better logql parsing errors for binops - -Signed-off-by: Owen Diehl - -* adds ^ operator" -9b1a427c5b97655ad9152b3ff2e31a2a84ba4e8b,2023-03-17 16:14:56,Periklis Tsirakidis,"operator: Add support for memberlist bind network configuration (#8672) - -Co-authored-by: Robert Jacob ",False,"diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md -index 0f99753070beb..ddb0e86cb35d1 100644 ---- a/operator/CHANGELOG.md -+++ b/operator/CHANGELOG.md -@@ -1,5 +1,6 @@ - ## Main - -+- [8672](https://github.com/grafana/loki/pull/8672) **periklis**: Add support for memberlist bind network configuration - - [8748](https://github.com/grafana/loki/pull/8748) **periklis**: Add alertingrule tenant id label for all rules - - [8743](https://github.com/grafana/loki/pull/8743) **periklis**: Add alerting style guide validation - - [8192](https://github.com/grafana/loki/pull/8192) **jotak**: Allow multiple matchers for multi-tenancy with Network tenant (OpenShift) -diff --git a/operator/apis/loki/v1/lokistack_types.go b/operator/apis/loki/v1/lokistack_types.go -index 14ccc41e41711..f9982a7200a8c 100644 ---- a/operator/apis/loki/v1/lokistack_types.go -+++ b/operator/apis/loki/v1/lokistack_types.go -@@ -345,6 +345,59 @@ type ClusterProxy struct { - NoProxy string `json:""noProxy,omitempty""` - } - -+// HashRingType defines the type of hash ring which can be used with the Loki cluster. -+// -+// +kubebuilder:validation:Enum=memberlist -+type HashRingType string -+ -+const ( -+ // HashRingMemberList when using memberlist for the distributed hash ring. -+ HashRingMemberList HashRingType = ""memberlist"" -+) -+ -+// InstanceAddrType defines the type of pod network to use for advertising IPs to the ring. -+// -+// +kubebuilder:validation:Enum=default;podIP -+type InstanceAddrType string -+ -+const ( -+ // InstanceAddrDefault when using the first from any private network interfaces (RFC 1918 and RFC 6598). -+ InstanceAddrDefault InstanceAddrType = ""default"" -+ // InstanceAddrPodIP when using the public pod IP from the cluster's pod network. -+ InstanceAddrPodIP InstanceAddrType = ""podIP"" -+) -+ -+// MemberListSpec defines the configuration for the memberlist based hash ring. -+type MemberListSpec struct { -+ // InstanceAddrType defines the type of address to use to advertise to the ring. -+ // Defaults to the first address from any private network interfaces of the current pod. -+ // Alternatively the public pod IP can be used in case private networks (RFC 1918 and RFC 6598) -+ // are not available. -+ // -+ // +optional -+ // +kubebuilder:validation:optional -+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={""urn:alm:descriptor:com.tectonic.ui:select:default"",""urn:alm:descriptor:com.tectonic.ui:select:podIP""},displayName=""Instance Address"" -+ InstanceAddrType InstanceAddrType `json:""instanceAddrType,omitempty""` -+} -+ -+// HashRingSpec defines the hash ring configuration -+type HashRingSpec struct { -+ // Type of hash ring implementation that should be used -+ // -+ // +required -+ // +kubebuilder:validation:Required -+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={""urn:alm:descriptor:com.tectonic.ui:select:memberlist""},displayName=""Type"" -+ // +kubebuilder:default:=memberlist -+ Type HashRingType `json:""type""` -+ -+ // MemberList configuration spec -+ // -+ // +optional -+ // +kubebuilder:validation:Optional -+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName=""Memberlist Config"" -+ MemberList *MemberListSpec `json:""memberlist,omitempty""` -+} -+ - // ObjectStorageTLSSpec is the TLS configuration for reaching the object storage endpoint. - type ObjectStorageTLSSpec struct { - // Key is the data key of a ConfigMap containing a CA certificate. -@@ -674,6 +727,13 @@ type LokiStackSpec struct { - // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={""urn:alm:descriptor:com.tectonic.ui:select:1x.extra-small"",""urn:alm:descriptor:com.tectonic.ui:select:1x.small"",""urn:alm:descriptor:com.tectonic.ui:select:1x.medium""},displayName=""LokiStack Size"" - Size LokiStackSizeType `json:""size""` - -+ // HashRing defines the spec for the distributed hash ring configuration. -+ // -+ // +optional -+ // +kubebuilder:validation:Optional -+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors=""urn:alm:descriptor:com.tectonic.ui:advanced"",displayName=""Hash Ring"" -+ HashRing *HashRingSpec `json:""hashRing,omitempty""` -+ - // Storage defines the spec for the object storage endpoint to store logs. - // - // +required -diff --git a/operator/apis/loki/v1/zz_generated.deepcopy.go b/operator/apis/loki/v1/zz_generated.deepcopy.go -index b9980637109c4..1ee5a5518c884 100644 ---- a/operator/apis/loki/v1/zz_generated.deepcopy.go -+++ b/operator/apis/loki/v1/zz_generated.deepcopy.go -@@ -441,6 +441,26 @@ func (in *ClusterProxy) DeepCopy() *ClusterProxy { - return out - } - -+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -+func (in *HashRingSpec) DeepCopyInto(out *HashRingSpec) { -+ *out = *in -+ if in.MemberList != nil { -+ in, out := &in.MemberList, &out.MemberList -+ *out = new(MemberListSpec) -+ **out = **in -+ } -+} -+ -+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HashRingSpec. -+func (in *HashRingSpec) DeepCopy() *HashRingSpec { -+ if in == nil { -+ return nil -+ } -+ out := new(HashRingSpec) -+ in.DeepCopyInto(out) -+ return out -+} -+ - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. - func (in *IngestionLimitSpec) DeepCopyInto(out *IngestionLimitSpec) { - *out = *in -@@ -739,6 +759,11 @@ func (in *LokiStackList) DeepCopyObject() runtime.Object { - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. - func (in *LokiStackSpec) DeepCopyInto(out *LokiStackSpec) { - *out = *in -+ if in.HashRing != nil { -+ in, out := &in.HashRing, &out.HashRing -+ *out = new(HashRingSpec) -+ (*in).DeepCopyInto(*out) -+ } - in.Storage.DeepCopyInto(&out.Storage) - if in.Proxy != nil { - in, out := &in.Proxy, &out.Proxy -@@ -876,6 +901,21 @@ func (in *LokiTemplateSpec) DeepCopy() *LokiTemplateSpec { - return out - } - -+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -+func (in *MemberListSpec) DeepCopyInto(out *MemberListSpec) { -+ *out = *in -+} -+ -+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemberListSpec. -+func (in *MemberListSpec) DeepCopy() *MemberListSpec { -+ if in == nil { -+ return nil -+ } -+ out := new(MemberListSpec) -+ in.DeepCopyInto(out) -+ return out -+} -+ - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. - func (in *OIDCSpec) DeepCopyInto(out *OIDCSpec) { - *out = *in -diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml -index cdc11e5bb3147..1821bffa7c1f1 100644 ---- a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml -+++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml -@@ -150,7 +150,7 @@ metadata: - categories: OpenShift Optional, Logging & Tracing - certified: ""false"" - containerImage: docker.io/grafana/loki-operator:main-39f2856 -- createdAt: ""2023-03-15T10:50:46Z"" -+ createdAt: ""2023-03-17T05:07:17Z"" - description: The Community Loki Operator provides Kubernetes native deployment - and management of Loki and related logging components. - operators.operatorframework.io/builder: operator-sdk-unknown -@@ -257,6 +257,28 @@ spec: - name: """" - version: v1 - specDescriptors: -+ - description: HashRing defines the spec for the distributed hash ring configuration. -+ displayName: Hash Ring -+ path: hashRing -+ x-descriptors: -+ - urn:alm:descriptor:com.tectonic.ui:advanced -+ - description: MemberList configuration spec -+ displayName: Memberlist Config -+ path: hashRing.memberlist -+ - description: InstanceAddrType defines the type of address to use to advertise -+ to the ring. Defaults to the first address from any private network interfaces -+ of the current pod. Alternatively the public pod IP can be used in case -+ private networks (RFC 1918 and RFC 6598) are not available. -+ displayName: Instance Address -+ path: hashRing.memberlist.instanceAddrType -+ x-descriptors: -+ - urn:alm:descriptor:com.tectonic.ui:select:default -+ - urn:alm:descriptor:com.tectonic.ui:select:podIP -+ - description: Type of hash ring implementation that should be used -+ displayName: Type -+ path: hashRing.type -+ x-descriptors: -+ - urn:alm:descriptor:com.tectonic.ui:select:memberlist - - description: Limits defines the limits to be applied to log stream processing. - displayName: Rate Limiting - path: limits -diff --git a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml -index ce28b3e0dab80..aec068b659e09 100644 ---- a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml -+++ b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml -@@ -54,6 +54,33 @@ spec: - spec: - description: LokiStack CR spec field. - properties: -+ hashRing: -+ description: HashRing defines the spec for the distributed hash ring -+ configuration. -+ properties: -+ memberlist: -+ description: MemberList configuration spec -+ properties: -+ instanceAddrType: -+ description: InstanceAddrType defines the type of address -+ to use to advertise to the ring. Defaults to the first address -+ from any private network interfaces of the current pod. -+ Alternatively the public pod IP can be used in case private -+ networks (RFC 1918 and RFC 6598) are not available. -+ enum: -+ - default -+ - podIP -+ type: string -+ type: object -+ type: -+ default: memberlist -+ description: Type of hash ring implementation that should be used -+ enum: -+ - memberlist -+ type: string -+ required: -+ - type -+ type: object - limits: - description: Limits defines the limits to be applied to log stream - processing. -diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml -index 5094e5ba191f6..6a9d3b97d27d8 100644 ---- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml -+++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml -@@ -150,7 +150,7 @@ metadata: - categories: OpenShift Optional, Logging & Tracing - certified: ""false"" - containerImage: quay.io/openshift-logging/loki-operator:v0.1.0 -- createdAt: ""2023-03-15T10:50:48Z"" -+ createdAt: ""2023-03-17T05:07:19Z"" - description: | - The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging. - ## Prerequisites and Requirements -@@ -270,6 +270,28 @@ spec: - name: """" - version: v1 - specDescriptors: -+ - description: HashRing defines the spec for the distributed hash ring configuration. -+ displayName: Hash Ring -+ path: hashRing -+ x-descriptors: -+ - urn:alm:descriptor:com.tectonic.ui:advanced -+ - description: MemberList configuration spec -+ displayName: Memberlist Config -+ path: hashRing.memberlist -+ - description: InstanceAddrType defines the type of address to use to advertise -+ to the ring. Defaults to the first address from any private network interfaces -+ of the current pod. Alternatively the public pod IP can be used in case -+ private networks (RFC 1918 and RFC 6598) are not available. -+ displayName: Instance Address -+ path: hashRing.memberlist.instanceAddrType -+ x-descriptors: -+ - urn:alm:descriptor:com.tectonic.ui:select:default -+ - urn:alm:descriptor:com.tectonic.ui:select:podIP -+ - description: Type of hash ring implementation that should be used -+ displayName: Type -+ path: hashRing.type -+ x-descriptors: -+ - urn:alm:descriptor:com.tectonic.ui:select:memberlist - - description: Limits defines the limits to be applied to log stream processing. - displayName: Rate Limiting - path: limits -diff --git a/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml -index ac754b4e7f8a2..db9ccfab7a577 100644 ---- a/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml -+++ b/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml -@@ -54,6 +54,33 @@ spec: - spec: - description: LokiStack CR spec field. - properties: -+ hashRing: -+ description: HashRing defines the spec for the distributed hash ring -+ configuration. -+ properties: -+ memberlist: -+ description: MemberList configuration spec -+ properties: -+ instanceAddrType: -+ description: InstanceAddrType defines the type of address -+ to use to advertise to the ring. Defaults to the first address -+ from any private network interfaces of the current pod. -+ Alternatively the public pod IP can be used in case private -+ networks (RFC 1918 and RFC 6598) are not available. -+ enum: -+ - default -+ - podIP -+ type: string -+ type: object -+ type: -+ default: memberlist -+ description: Type of hash ring implementation that should be used -+ enum: -+ - memberlist -+ type: string -+ required: -+ - type -+ type: object - limits: - description: Limits defines the limits to be applied to log stream - processing. -diff --git a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml -index af8a1603b4dde..213a070799c9e 100644 ---- a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml -+++ b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml -@@ -37,6 +37,33 @@ spec: - spec: - description: LokiStack CR spec field. - properties: -+ hashRing: -+ description: HashRing defines the spec for the distributed hash ring -+ configuration. -+ properties: -+ memberlist: -+ description: MemberList configuration spec -+ properties: -+ instanceAddrType: -+ description: InstanceAddrType defines the type of address -+ to use to advertise to the ring. Defaults to the first address -+ from any private network interfaces of the current pod. -+ Alternatively the public pod IP can be used in case private -+ networks (RFC 1918 and RFC 6598) are not available. -+ enum: -+ - default -+ - podIP -+ type: string -+ type: object -+ type: -+ default: memberlist -+ description: Type of hash ring implementation that should be used -+ enum: -+ - memberlist -+ type: string -+ required: -+ - type -+ type: object - limits: - description: Limits defines the limits to be applied to log stream - processing. -diff --git a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml -index 0f54d643ea172..15857ea949044 100644 ---- a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml -+++ b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml -@@ -170,6 +170,28 @@ spec: - name: """" - version: v1 - specDescriptors: -+ - description: HashRing defines the spec for the distributed hash ring configuration. -+ displayName: Hash Ring -+ path: hashRing -+ x-descriptors: -+ - urn:alm:descriptor:com.tectonic.ui:advanced -+ - description: MemberList configuration spec -+ displayName: Memberlist Config -+ path: hashRing.memberlist -+ - description: InstanceAddrType defines the type of address to use to advertise -+ to the ring. Defaults to the first address from any private network interfaces -+ of the current pod. Alternatively the public pod IP can be used in case -+ private networks (RFC 1918 and RFC 6598) are not available. -+ displayName: Instance Address -+ path: hashRing.memberlist.instanceAddrType -+ x-descriptors: -+ - urn:alm:descriptor:com.tectonic.ui:select:default -+ - urn:alm:descriptor:com.tectonic.ui:select:podIP -+ - description: Type of hash ring implementation that should be used -+ displayName: Type -+ path: hashRing.type -+ x-descriptors: -+ - urn:alm:descriptor:com.tectonic.ui:select:memberlist - - description: Limits defines the limits to be applied to log stream processing. - displayName: Rate Limiting - path: limits -diff --git a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml -index 5988af0d89253..15ee9e8466eff 100644 ---- a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml -+++ b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml -@@ -182,6 +182,28 @@ spec: - name: """" - version: v1 - specDescriptors: -+ - description: HashRing defines the spec for the distributed hash ring configuration. -+ displayName: Hash Ring -+ path: hashRing -+ x-descriptors: -+ - urn:alm:descriptor:com.tectonic.ui:advanced -+ - description: MemberList configuration spec -+ displayName: Memberlist Config -+ path: hashRing.memberlist -+ - description: InstanceAddrType defines the type of address to use to advertise -+ to the ring. Defaults to the first address from any private network interfaces -+ of the current pod. Alternatively the public pod IP can be used in case -+ private networks (RFC 1918 and RFC 6598) are not available. -+ displayName: Instance Address -+ path: hashRing.memberlist.instanceAddrType -+ x-descriptors: -+ - urn:alm:descriptor:com.tectonic.ui:select:default -+ - urn:alm:descriptor:com.tectonic.ui:select:podIP -+ - description: Type of hash ring implementation that should be used -+ displayName: Type -+ path: hashRing.type -+ x-descriptors: -+ - urn:alm:descriptor:com.tectonic.ui:select:memberlist - - description: Limits defines the limits to be applied to log stream processing. - displayName: Rate Limiting - path: limits -diff --git a/operator/docs/operator/api.md b/operator/docs/operator/api.md -index 8ca30892ff7c7..d504a742dd7a5 100644 ---- a/operator/docs/operator/api.md -+++ b/operator/docs/operator/api.md -@@ -941,6 +941,72 @@ string - - - -+## HashRingSpec { #loki-grafana-com-v1-HashRingSpec } -+

-+(Appears on:LokiStackSpec) -+

-+
-+

HashRingSpec defines the hash ring configuration

-+
-+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+
FieldDescription
-+type
-+ -+ -+HashRingType -+ -+ -+
-+

Type of hash ring implementation that should be used

-+
-+memberlist
-+ -+ -+MemberListSpec -+ -+ -+
-+(Optional) -+

MemberList configuration spec

-+
-+ -+## HashRingType { #loki-grafana-com-v1-HashRingType } -+(string alias) -+

-+(Appears on:HashRingSpec) -+

-+
-+

HashRingType defines the type of hash ring which can be used with the Loki cluster.

-+
-+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+
ValueDescription

"memberlist"

HashRingMemberList when using memberlist for the distributed hash ring.

-+
-+ - ## IngestionLimitSpec { #loki-grafana-com-v1-IngestionLimitSpec } -

- (Appears on:LimitsTemplateSpec) -@@ -1049,6 +1115,30 @@ int32 - - - -+## InstanceAddrType { #loki-grafana-com-v1-InstanceAddrType } -+(string alias) -+

-+(Appears on:MemberListSpec) -+

-+
-+

InstanceAddrType defines the type of pod network to use for advertising IPs to the ring.

-+
-+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+
ValueDescription

"default"

InstanceAddrDefault when using the first from any private network interfaces (RFC 1918 and RFC 6598).

-+

"podIP"

InstanceAddrPodIP when using the public pod IP from the cluster’s pod network.

-+
-+ - ## LimitsSpec { #loki-grafana-com-v1-LimitsSpec } -

- (Appears on:LokiStackSpec) -@@ -1580,6 +1670,20 @@ LokiStackSizeType - - - -+hashRing
-+ -+ -+HashRingSpec -+ -+ -+ -+ -+(Optional) -+

HashRing defines the spec for the distributed hash ring configuration.

-+ -+ -+ -+ - storage
- - -@@ -1940,6 +2044,41 @@ reconciled by the operator.

- - - -+## MemberListSpec { #loki-grafana-com-v1-MemberListSpec } -+

-+(Appears on:HashRingSpec) -+

-+
-+

MemberListSpec defines the configuration for the memberlist based hash ring.

-+
-+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+
FieldDescription
-+instanceAddrType
-+ -+ -+InstanceAddrType -+ -+ -+
-+(Optional) -+

InstanceAddrType defines the type of address to use to advertise to the ring. -+Defaults to the first address from any private network interfaces of the current pod. -+Alternatively the public pod IP can be used in case private networks (RFC 1918 and RFC 6598) -+are not available.

-+
-+ - ## ModeType { #loki-grafana-com-v1-ModeType } - (string alias) -

-diff --git a/operator/internal/manifests/compactor.go b/operator/internal/manifests/compactor.go -index 17b91f0242552..f619466b9bfe5 100644 ---- a/operator/internal/manifests/compactor.go -+++ b/operator/internal/manifests/compactor.go -@@ -43,6 +43,10 @@ func BuildCompactor(opts Options) ([]client.Object, error) { - } - } - -+ if err := configureHashRingEnv(&statefulSet.Spec.Template.Spec, opts); err != nil { -+ return nil, err -+ } -+ - if err := configureProxyEnv(&statefulSet.Spec.Template.Spec, opts); err != nil { - return nil, err - } -@@ -83,6 +87,7 @@ func NewCompactorStatefulSet(opts Options) *appsv1.StatefulSet { - ""-target=compactor"", - fmt.Sprintf(""-config.file=%s"", path.Join(config.LokiConfigMountDir, config.LokiConfigFileName)), - fmt.Sprintf(""-runtime-config.file=%s"", path.Join(config.LokiConfigMountDir, config.LokiRuntimeConfigFileName)), -+ ""-config.expand-env=true"", - }, - ReadinessProbe: lokiReadinessProbe(), - LivenessProbe: lokiLivenessProbe(), -diff --git a/operator/internal/manifests/config.go b/operator/internal/manifests/config.go -index c27373192da7e..4a2e1596e2734 100644 ---- a/operator/internal/manifests/config.go -+++ b/operator/internal/manifests/config.go -@@ -143,10 +143,7 @@ func ConfigOptions(opt Options) config.Options { - FQDN: fqdn(NewQueryFrontendGRPCService(opt).GetName(), opt.Namespace), - Port: grpcPort, - }, -- GossipRing: config.Address{ -- FQDN: fqdn(BuildLokiGossipRingService(opt.Name).GetName(), opt.Namespace), -- Port: gossipPort, -- }, -+ GossipRing: gossipRingConfig(opt.Name, opt.Namespace, opt.Stack.HashRing), - Querier: config.Address{ - Protocol: protocol, - FQDN: fqdn(NewQuerierHTTPService(opt).GetName(), opt.Namespace), -@@ -246,6 +243,27 @@ func alertManagerConfig(spec *lokiv1.AlertManagerSpec) *config.AlertManagerConfi - return conf - } - -+func gossipRingConfig(stackName, stackNs string, spec *lokiv1.HashRingSpec) config.GossipRing { -+ var instanceAddr string -+ if spec != nil && spec.Type == lokiv1.HashRingMemberList && spec.MemberList != nil { -+ switch spec.MemberList.InstanceAddrType { -+ case lokiv1.InstanceAddrPodIP: -+ instanceAddr = fmt.Sprintf(""${%s}"", gossipInstanceAddrEnvVarName) -+ case lokiv1.InstanceAddrDefault: -+ // Do nothing use loki defaults -+ default: -+ // Do nothing use loki defaults -+ } -+ } -+ -+ return config.GossipRing{ -+ InstanceAddr: instanceAddr, -+ InstancePort: grpcPort, -+ BindPort: gossipPort, -+ MembersDiscoveryAddr: fqdn(BuildLokiGossipRingService(stackName).GetName(), stackNs), -+ } -+} -+ - func remoteWriteConfig(s *lokiv1.RemoteWriteSpec, rs *RulerSecret) *config.RemoteWriteConfig { - if s == nil || rs == nil { - return nil -diff --git a/operator/internal/manifests/config_test.go b/operator/internal/manifests/config_test.go -index 51bea714fd491..f2b7cb129e97e 100644 ---- a/operator/internal/manifests/config_test.go -+++ b/operator/internal/manifests/config_test.go -@@ -182,6 +182,84 @@ func randomConfigOptions() manifests.Options { - } - } - -+func TestConfigOptions_GossipRingConfig(t *testing.T) { -+ tt := []struct { -+ desc string -+ spec lokiv1.LokiStackSpec -+ wantOptions config.GossipRing -+ }{ -+ { -+ desc: ""defaults"", -+ spec: lokiv1.LokiStackSpec{}, -+ wantOptions: config.GossipRing{ -+ InstancePort: 9095, -+ BindPort: 7946, -+ MembersDiscoveryAddr: ""my-stack-gossip-ring.my-ns.svc.cluster.local"", -+ }, -+ }, -+ { -+ desc: ""defaults with empty config"", -+ spec: lokiv1.LokiStackSpec{ -+ HashRing: &lokiv1.HashRingSpec{ -+ Type: lokiv1.HashRingMemberList, -+ }, -+ }, -+ wantOptions: config.GossipRing{ -+ InstancePort: 9095, -+ BindPort: 7946, -+ MembersDiscoveryAddr: ""my-stack-gossip-ring.my-ns.svc.cluster.local"", -+ }, -+ }, -+ { -+ desc: ""user selected any instance addr"", -+ spec: lokiv1.LokiStackSpec{ -+ HashRing: &lokiv1.HashRingSpec{ -+ Type: lokiv1.HashRingMemberList, -+ MemberList: &lokiv1.MemberListSpec{ -+ InstanceAddrType: lokiv1.InstanceAddrDefault, -+ }, -+ }, -+ }, -+ wantOptions: config.GossipRing{ -+ InstancePort: 9095, -+ BindPort: 7946, -+ MembersDiscoveryAddr: ""my-stack-gossip-ring.my-ns.svc.cluster.local"", -+ }, -+ }, -+ { -+ desc: ""user selected podIP instance addr"", -+ spec: lokiv1.LokiStackSpec{ -+ HashRing: &lokiv1.HashRingSpec{ -+ Type: lokiv1.HashRingMemberList, -+ MemberList: &lokiv1.MemberListSpec{ -+ InstanceAddrType: lokiv1.InstanceAddrPodIP, -+ }, -+ }, -+ }, -+ wantOptions: config.GossipRing{ -+ InstanceAddr: ""${HASH_RING_INSTANCE_ADDR}"", -+ InstancePort: 9095, -+ BindPort: 7946, -+ MembersDiscoveryAddr: ""my-stack-gossip-ring.my-ns.svc.cluster.local"", -+ }, -+ }, -+ } -+ for _, tc := range tt { -+ tc := tc -+ t.Run(tc.desc, func(t *testing.T) { -+ t.Parallel() -+ -+ inOpt := manifests.Options{ -+ Name: ""my-stack"", -+ Namespace: ""my-ns"", -+ Stack: tc.spec, -+ } -+ options := manifests.ConfigOptions(inOpt) -+ require.Equal(t, tc.wantOptions, options.GossipRing) -+ }) -+ } -+} -+ - func TestConfigOptions_RetentionConfig(t *testing.T) { - tt := []struct { - desc string -diff --git a/operator/internal/manifests/distributor.go b/operator/internal/manifests/distributor.go -index c8e53c8f5518f..2a4f1c328384b 100644 ---- a/operator/internal/manifests/distributor.go -+++ b/operator/internal/manifests/distributor.go -@@ -37,6 +37,10 @@ func BuildDistributor(opts Options) ([]client.Object, error) { - } - } - -+ if err := configureHashRingEnv(&deployment.Spec.Template.Spec, opts); err != nil { -+ return nil, err -+ } -+ - if err := configureProxyEnv(&deployment.Spec.Template.Spec, opts); err != nil { - return nil, err - } -@@ -77,6 +81,7 @@ func NewDistributorDeployment(opts Options) *appsv1.Deployment { - ""-target=distributor"", - fmt.Sprintf(""-config.file=%s"", path.Join(config.LokiConfigMountDir, config.LokiConfigFileName)), - fmt.Sprintf(""-runtime-config.file=%s"", path.Join(config.LokiConfigMountDir, config.LokiRuntimeConfigFileName)), -+ ""-config.expand-env=true"", - }, - ReadinessProbe: lokiReadinessProbe(), - LivenessProbe: lokiLivenessProbe(), -diff --git a/operator/internal/manifests/indexgateway.go b/operator/internal/manifests/indexgateway.go -index ca4b8b5884b54..0c31ab4528f7a 100644 ---- a/operator/internal/manifests/indexgateway.go -+++ b/operator/internal/manifests/indexgateway.go -@@ -43,6 +43,10 @@ func BuildIndexGateway(opts Options) ([]client.Object, error) { - } - } - -+ if err := configureHashRingEnv(&statefulSet.Spec.Template.Spec, opts); err != nil { -+ return nil, err -+ } -+ - if err := configureProxyEnv(&statefulSet.Spec.Template.Spec, opts); err != nil { - return nil, err - } -@@ -83,6 +87,7 @@ func NewIndexGatewayStatefulSet(opts Options) *appsv1.StatefulSet { - ""-target=index-gateway"", - fmt.Sprintf(""-config.file=%s"", path.Join(config.LokiConfigMountDir, config.LokiConfigFileName)), - fmt.Sprintf(""-runtime-config.file=%s"", path.Join(config.LokiConfigMountDir, config.LokiRuntimeConfigFileName)), -+ ""-config.expand-env=true"", - }, - ReadinessProbe: lokiReadinessProbe(), - LivenessProbe: lokiLivenessProbe(), -diff --git a/operator/internal/manifests/ingester.go b/operator/internal/manifests/ingester.go -index 5a842a3c28046..38518f54b8df1 100644 ---- a/operator/internal/manifests/ingester.go -+++ b/operator/internal/manifests/ingester.go -@@ -43,6 +43,10 @@ func BuildIngester(opts Options) ([]client.Object, error) { - } - } - -+ if err := configureHashRingEnv(&statefulSet.Spec.Template.Spec, opts); err != nil { -+ return nil, err -+ } -+ - if err := configureProxyEnv(&statefulSet.Spec.Template.Spec, opts); err != nil { - return nil, err - } -@@ -83,6 +87,7 @@ func NewIngesterStatefulSet(opts Options) *appsv1.StatefulSet { - ""-target=ingester"", - fmt.Sprintf(""-config.file=%s"", path.Join(config.LokiConfigMountDir, config.LokiConfigFileName)), - fmt.Sprintf(""-runtime-config.file=%s"", path.Join(config.LokiConfigMountDir, config.LokiRuntimeConfigFileName)), -+ ""-config.expand-env=true"", - }, - ReadinessProbe: lokiReadinessProbe(), - LivenessProbe: lokiLivenessProbe(), -diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go -index 99e52891f21de..974734c975c36 100644 ---- a/operator/internal/manifests/internal/config/build_test.go -+++ b/operator/internal/manifests/internal/config/build_test.go -@@ -29,6 +29,12 @@ common: - secret_access_key: test123 - s3forcepathstyle: true - compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095 -+ ring: -+ kvstore: -+ store: memberlist -+ heartbeat_period: 5s -+ heartbeat_timeout: 1m -+ instance_port: 9095 - compactor: - compaction_interval: 2h - working_directory: /tmp/loki/compactor -@@ -51,12 +57,10 @@ ingester: - flush_op_timeout: 10m - lifecycler: - final_sleep: 0s -- heartbeat_period: 5s - join_after: 30s - num_tokens: 512 - ring: - replication_factor: 1 -- heartbeat_timeout: 1m - max_chunk_age: 2h - max_transfer_retries: 0 - wal: -@@ -200,9 +204,10 @@ overrides: - FQDN: ""loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local"", - Port: 9095, - }, -- GossipRing: Address{ -- FQDN: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", -- Port: 7946, -+ GossipRing: GossipRing{ -+ InstancePort: 9095, -+ BindPort: 7946, -+ MembersDiscoveryAddr: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", - }, - Querier: Address{ - Protocol: ""http"", -@@ -264,6 +269,12 @@ common: - secret_access_key: test123 - s3forcepathstyle: true - compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095 -+ ring: -+ kvstore: -+ store: memberlist -+ heartbeat_period: 5s -+ heartbeat_timeout: 1m -+ instance_port: 9095 - compactor: - compaction_interval: 2h - working_directory: /tmp/loki/compactor -@@ -286,12 +297,10 @@ ingester: - flush_op_timeout: 10m - lifecycler: - final_sleep: 0s -- heartbeat_period: 5s - join_after: 30s - num_tokens: 512 - ring: - replication_factor: 1 -- heartbeat_timeout: 1m - max_chunk_age: 2h - max_transfer_retries: 0 - wal: -@@ -466,9 +475,10 @@ overrides: - FQDN: ""loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local"", - Port: 9095, - }, -- GossipRing: Address{ -- FQDN: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", -- Port: 7946, -+ GossipRing: GossipRing{ -+ InstancePort: 9095, -+ BindPort: 7946, -+ MembersDiscoveryAddr: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", - }, - Querier: Address{ - Protocol: ""http"", -@@ -540,9 +550,10 @@ func TestBuild_ConfigAndRuntimeConfig_CreateLokiConfigFailed(t *testing.T) { - FQDN: ""loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local"", - Port: 9095, - }, -- GossipRing: Address{ -- FQDN: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", -- Port: 7946, -+ GossipRing: GossipRing{ -+ InstancePort: 9095, -+ BindPort: 7946, -+ MembersDiscoveryAddr: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", - }, - Querier: Address{ - Protocol: ""http"", -@@ -603,6 +614,12 @@ common: - secret_access_key: test123 - s3forcepathstyle: true - compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095 -+ ring: -+ kvstore: -+ store: memberlist -+ heartbeat_period: 5s -+ heartbeat_timeout: 1m -+ instance_port: 9095 - compactor: - compaction_interval: 2h - working_directory: /tmp/loki/compactor -@@ -625,12 +642,10 @@ ingester: - flush_op_timeout: 10m - lifecycler: - final_sleep: 0s -- heartbeat_period: 5s - join_after: 30s - num_tokens: 512 - ring: - replication_factor: 1 -- heartbeat_timeout: 1m - max_chunk_age: 2h - max_transfer_retries: 0 - wal: -@@ -828,9 +843,10 @@ overrides: - FQDN: ""loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local"", - Port: 9095, - }, -- GossipRing: Address{ -- FQDN: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", -- Port: 7946, -+ GossipRing: GossipRing{ -+ InstancePort: 9095, -+ BindPort: 7946, -+ MembersDiscoveryAddr: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", - }, - Querier: Address{ - Protocol: ""http"", -@@ -939,6 +955,12 @@ common: - secret_access_key: test123 - s3forcepathstyle: true - compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095 -+ ring: -+ kvstore: -+ store: memberlist -+ heartbeat_period: 5s -+ heartbeat_timeout: 1m -+ instance_port: 9095 - compactor: - compaction_interval: 2h - working_directory: /tmp/loki/compactor -@@ -961,12 +983,10 @@ ingester: - flush_op_timeout: 10m - lifecycler: - final_sleep: 0s -- heartbeat_period: 5s - join_after: 30s - num_tokens: 512 - ring: - replication_factor: 1 -- heartbeat_timeout: 1m - max_chunk_age: 2h - max_transfer_retries: 0 - wal: -@@ -1164,9 +1184,10 @@ overrides: - FQDN: ""loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local"", - Port: 9095, - }, -- GossipRing: Address{ -- FQDN: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", -- Port: 7946, -+ GossipRing: GossipRing{ -+ InstancePort: 9095, -+ BindPort: 7946, -+ MembersDiscoveryAddr: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", - }, - Querier: Address{ - Protocol: ""http"", -@@ -1276,6 +1297,12 @@ common: - secret_access_key: test123 - s3forcepathstyle: true - compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095 -+ ring: -+ kvstore: -+ store: memberlist -+ heartbeat_period: 5s -+ heartbeat_timeout: 1m -+ instance_port: 9095 - compactor: - compaction_interval: 2h - working_directory: /tmp/loki/compactor -@@ -1298,12 +1325,10 @@ ingester: - flush_op_timeout: 10m - lifecycler: - final_sleep: 0s -- heartbeat_period: 5s - join_after: 30s - num_tokens: 512 - ring: - replication_factor: 1 -- heartbeat_timeout: 1m - max_chunk_age: 2h - max_transfer_retries: 0 - wal: -@@ -1514,9 +1539,10 @@ overrides: - FQDN: ""loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local"", - Port: 9095, - }, -- GossipRing: Address{ -- FQDN: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", -- Port: 7946, -+ GossipRing: GossipRing{ -+ InstancePort: 9095, -+ BindPort: 7946, -+ MembersDiscoveryAddr: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", - }, - Querier: Address{ - Protocol: ""http"", -@@ -1643,6 +1669,12 @@ common: - secret_access_key: test123 - s3forcepathstyle: true - compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095 -+ ring: -+ kvstore: -+ store: memberlist -+ heartbeat_period: 5s -+ heartbeat_timeout: 1m -+ instance_port: 9095 - compactor: - compaction_interval: 2h - working_directory: /tmp/loki/compactor -@@ -1668,12 +1700,10 @@ ingester: - flush_op_timeout: 10m - lifecycler: - final_sleep: 0s -- heartbeat_period: 5s - join_after: 30s - num_tokens: 512 - ring: - replication_factor: 1 -- heartbeat_timeout: 1m - max_chunk_age: 2h - max_transfer_retries: 0 - wal: -@@ -1884,9 +1914,10 @@ overrides: - FQDN: ""loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local"", - Port: 9095, - }, -- GossipRing: Address{ -- FQDN: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", -- Port: 7946, -+ GossipRing: GossipRing{ -+ InstancePort: 9095, -+ BindPort: 7946, -+ MembersDiscoveryAddr: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", - }, - Querier: Address{ - Protocol: ""http"", -@@ -1955,6 +1986,12 @@ common: - secret_access_key: test123 - s3forcepathstyle: true - compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095 -+ ring: -+ kvstore: -+ store: memberlist -+ heartbeat_period: 5s -+ heartbeat_timeout: 1m -+ instance_port: 9095 - compactor: - compaction_interval: 2h - working_directory: /tmp/loki/compactor -@@ -1977,12 +2014,10 @@ ingester: - flush_op_timeout: 10m - lifecycler: - final_sleep: 0s -- heartbeat_period: 5s - join_after: 30s - num_tokens: 512 - ring: - replication_factor: 1 -- heartbeat_timeout: 1m - max_chunk_age: 2h - max_transfer_retries: 0 - wal: -@@ -2206,9 +2241,10 @@ overrides: - FQDN: ""loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local"", - Port: 9095, - }, -- GossipRing: Address{ -- FQDN: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", -- Port: 7946, -+ GossipRing: GossipRing{ -+ InstancePort: 9095, -+ BindPort: 7946, -+ MembersDiscoveryAddr: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", - }, - Querier: Address{ - Protocol: ""http"", -@@ -2352,6 +2388,12 @@ common: - secret_access_key: test123 - s3forcepathstyle: true - compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095 -+ ring: -+ kvstore: -+ store: memberlist -+ heartbeat_period: 5s -+ heartbeat_timeout: 1m -+ instance_port: 9095 - compactor: - compaction_interval: 2h - working_directory: /tmp/loki/compactor -@@ -2388,12 +2430,10 @@ ingester: - flush_op_timeout: 10m - lifecycler: - final_sleep: 0s -- heartbeat_period: 5s - join_after: 30s - num_tokens: 512 - ring: - replication_factor: 1 -- heartbeat_timeout: 1m - max_chunk_age: 2h - max_transfer_retries: 0 - wal: -@@ -2612,9 +2652,10 @@ overrides: - FQDN: ""loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local"", - Port: 9095, - }, -- GossipRing: Address{ -- FQDN: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", -- Port: 7946, -+ GossipRing: GossipRing{ -+ InstancePort: 9095, -+ BindPort: 7946, -+ MembersDiscoveryAddr: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", - }, - Querier: Address{ - Protocol: ""http"", -@@ -2676,6 +2717,12 @@ common: - secret_access_key: test123 - s3forcepathstyle: true - compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095 -+ ring: -+ kvstore: -+ store: memberlist -+ heartbeat_period: 5s -+ heartbeat_timeout: 1m -+ instance_port: 9095 - compactor: - compaction_interval: 2h - working_directory: /tmp/loki/compactor -@@ -2698,12 +2745,10 @@ ingester: - flush_op_timeout: 10m - lifecycler: - final_sleep: 0s -- heartbeat_period: 5s - join_after: 30s - num_tokens: 512 - ring: - replication_factor: 1 -- heartbeat_timeout: 1m - max_chunk_age: 2h - max_transfer_retries: 0 - wal: -@@ -2955,9 +3000,10 @@ overrides: - FQDN: ""loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local"", - Port: 9095, - }, -- GossipRing: Address{ -- FQDN: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", -- Port: 7946, -+ GossipRing: GossipRing{ -+ InstancePort: 9095, -+ BindPort: 7946, -+ MembersDiscoveryAddr: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", - }, - Querier: Address{ - Protocol: ""http"", -@@ -3130,3 +3176,245 @@ overrides: - require.YAMLEq(t, expCfg, string(cfg)) - require.YAMLEq(t, expRCfg, string(rCfg)) - } -+ -+func TestBuild_ConfigAndRuntimeConfig_WithHashRingSpec(t *testing.T) { -+ expCfg := ` -+--- -+auth_enabled: true -+chunk_store_config: -+ chunk_cache_config: -+ embedded_cache: -+ enabled: true -+ max_size_mb: 500 -+common: -+ storage: -+ s3: -+ s3: http://test.default.svc.cluster.local.:9000 -+ bucketnames: loki -+ region: us-east -+ access_key_id: test -+ secret_access_key: test123 -+ s3forcepathstyle: true -+ compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095 -+ ring: -+ kvstore: -+ store: memberlist -+ heartbeat_period: 5s -+ heartbeat_timeout: 1m -+ instance_addr: ${HASH_RING_INSTANCE_ADDR} -+ instance_port: 9095 -+compactor: -+ compaction_interval: 2h -+ working_directory: /tmp/loki/compactor -+frontend: -+ tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 -+ compress_responses: true -+ max_outstanding_per_tenant: 256 -+ log_queries_longer_than: 5s -+frontend_worker: -+ frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 -+ grpc_client_config: -+ max_send_msg_size: 104857600 -+ match_max_concurrent: true -+ingester: -+ chunk_block_size: 262144 -+ chunk_encoding: snappy -+ chunk_idle_period: 1h -+ chunk_retain_period: 5m -+ chunk_target_size: 2097152 -+ flush_op_timeout: 10m -+ lifecycler: -+ final_sleep: 0s -+ join_after: 30s -+ num_tokens: 512 -+ ring: -+ replication_factor: 1 -+ max_chunk_age: 2h -+ max_transfer_retries: 0 -+ wal: -+ enabled: true -+ dir: /tmp/wal -+ replay_memory_ceiling: 2500 -+ingester_client: -+ grpc_client_config: -+ max_recv_msg_size: 67108864 -+ remote_timeout: 1s -+# NOTE: Keep the order of keys as in Loki docs -+# to enable easy diffs when vendoring newer -+# Loki releases. -+# (See https://grafana.com/docs/loki/latest/configuration/#limits_config) -+# -+# Values for not exposed fields are taken from the grafana/loki production -+# configuration manifests. -+# (See https://github.com/grafana/loki/blob/main/production/ksonnet/loki/config.libsonnet) -+limits_config: -+ ingestion_rate_strategy: global -+ ingestion_rate_mb: 4 -+ ingestion_burst_size_mb: 6 -+ max_label_name_length: 1024 -+ max_label_value_length: 2048 -+ max_label_names_per_series: 30 -+ reject_old_samples: true -+ reject_old_samples_max_age: 168h -+ creation_grace_period: 10m -+ enforce_metric_name: false -+ # Keep max_streams_per_user always to 0 to default -+ # using max_global_streams_per_user always. -+ # (See https://github.com/grafana/loki/blob/main/pkg/ingester/limiter.go#L73) -+ max_streams_per_user: 0 -+ max_line_size: 256000 -+ max_entries_limit_per_query: 5000 -+ max_global_streams_per_user: 0 -+ max_chunks_per_query: 2000000 -+ max_query_length: 721h -+ max_query_parallelism: 32 -+ max_query_series: 500 -+ cardinality_limit: 100000 -+ max_streams_matchers_per_query: 1000 -+ max_cache_freshness_per_query: 10m -+ per_stream_rate_limit: 3MB -+ per_stream_rate_limit_burst: 15MB -+ split_queries_by_interval: 30m -+ query_timeout: 1m -+memberlist: -+ abort_if_cluster_join_fails: true -+ bind_port: 7946 -+ join_members: -+ - loki-gossip-ring-lokistack-dev.default.svc.cluster.local:7946 -+ max_join_backoff: 1m -+ max_join_retries: 10 -+ min_join_backoff: 1s -+querier: -+ engine: -+ max_look_back_period: 30s -+ extra_query_delay: 0s -+ max_concurrent: 2 -+ query_ingesters_within: 3h -+ tail_max_duration: 1h -+query_range: -+ align_queries_with_step: true -+ cache_results: true -+ max_retries: 5 -+ results_cache: -+ cache: -+ embedded_cache: -+ enabled: true -+ max_size_mb: 500 -+ parallelise_shardable_queries: true -+schema_config: -+ configs: -+ - from: ""2020-10-01"" -+ index: -+ period: 24h -+ prefix: index_ -+ object_store: s3 -+ schema: v11 -+ store: boltdb-shipper -+server: -+ graceful_shutdown_timeout: 5s -+ grpc_server_min_time_between_pings: '10s' -+ grpc_server_ping_without_stream_allowed: true -+ grpc_server_max_concurrent_streams: 1000 -+ grpc_server_max_recv_msg_size: 104857600 -+ grpc_server_max_send_msg_size: 104857600 -+ http_listen_port: 3100 -+ http_server_idle_timeout: 120s -+ http_server_write_timeout: 1m -+ log_level: info -+storage_config: -+ boltdb_shipper: -+ active_index_directory: /tmp/loki/index -+ cache_location: /tmp/loki/index_cache -+ cache_ttl: 24h -+ resync_interval: 5m -+ shared_store: s3 -+ index_gateway_client: -+ server_address: dns:///loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local:9095 -+tracing: -+ enabled: false -+analytics: -+ reporting_enabled: true -+` -+ expRCfg := ` -+--- -+overrides: -+` -+ opts := Options{ -+ Stack: lokiv1.LokiStackSpec{ -+ ReplicationFactor: 1, -+ Limits: &lokiv1.LimitsSpec{ -+ Global: &lokiv1.LimitsTemplateSpec{ -+ IngestionLimits: &lokiv1.IngestionLimitSpec{ -+ IngestionRate: 4, -+ IngestionBurstSize: 6, -+ MaxLabelNameLength: 1024, -+ MaxLabelValueLength: 2048, -+ MaxLabelNamesPerSeries: 30, -+ MaxGlobalStreamsPerTenant: 0, -+ MaxLineSize: 256000, -+ }, -+ QueryLimits: &lokiv1.QueryLimitSpec{ -+ MaxEntriesLimitPerQuery: 5000, -+ MaxChunksPerQuery: 2000000, -+ MaxQuerySeries: 500, -+ QueryTimeout: ""1m"", -+ }, -+ }, -+ }, -+ }, -+ Namespace: ""test-ns"", -+ Name: ""test"", -+ Compactor: Address{ -+ FQDN: ""loki-compactor-grpc-lokistack-dev.default.svc.cluster.local"", -+ Port: 9095, -+ }, -+ FrontendWorker: Address{ -+ FQDN: ""loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local"", -+ Port: 9095, -+ }, -+ GossipRing: GossipRing{ -+ InstanceAddr: ""${HASH_RING_INSTANCE_ADDR}"", -+ InstancePort: 9095, -+ BindPort: 7946, -+ MembersDiscoveryAddr: ""loki-gossip-ring-lokistack-dev.default.svc.cluster.local"", -+ }, -+ Querier: Address{ -+ Protocol: ""http"", -+ FQDN: ""loki-querier-http-lokistack-dev.default.svc.cluster.local"", -+ Port: 3100, -+ }, -+ IndexGateway: Address{ -+ FQDN: ""loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local"", -+ Port: 9095, -+ }, -+ StorageDirectory: ""/tmp/loki"", -+ MaxConcurrent: MaxConcurrent{ -+ AvailableQuerierCPUCores: 2, -+ }, -+ WriteAheadLog: WriteAheadLog{ -+ Directory: ""/tmp/wal"", -+ IngesterMemoryRequest: 5000, -+ }, -+ ObjectStorage: storage.Options{ -+ SharedStore: lokiv1.ObjectStorageSecretS3, -+ S3: &storage.S3StorageConfig{ -+ Endpoint: ""http://test.default.svc.cluster.local.:9000"", -+ Region: ""us-east"", -+ Buckets: ""loki"", -+ AccessKeyID: ""test"", -+ AccessKeySecret: ""test123"", -+ }, -+ Schemas: []lokiv1.ObjectStorageSchema{ -+ { -+ Version: lokiv1.ObjectStorageSchemaV11, -+ EffectiveDate: ""2020-10-01"", -+ }, -+ }, -+ }, -+ EnableRemoteReporting: true, -+ } -+ cfg, rCfg, err := Build(opts) -+ require.NoError(t, err) -+ require.YAMLEq(t, expCfg, string(cfg)) -+ require.YAMLEq(t, expRCfg, string(rCfg)) -+} -diff --git a/operator/internal/manifests/internal/config/loki-config.yaml b/operator/internal/manifests/internal/config/loki-config.yaml -index b2836448cf4ec..e4080170e426c 100644 ---- a/operator/internal/manifests/internal/config/loki-config.yaml -+++ b/operator/internal/manifests/internal/config/loki-config.yaml -@@ -46,6 +46,17 @@ common: - container_name: {{ .Container }} - {{- end }} - compactor_grpc_address: {{ .Compactor.FQDN }}:{{ .Compactor.Port }} -+ {{- with .GossipRing }} -+ ring: -+ kvstore: -+ store: memberlist -+ heartbeat_period: 5s -+ heartbeat_timeout: 1m -+ {{- with .InstanceAddr }} -+ instance_addr: {{ . }} -+ {{- end }} -+ instance_port: {{ .InstancePort }} -+ {{- end }} - compactor: - compaction_interval: 2h - working_directory: {{ .StorageDirectory }}/compactor -@@ -92,12 +103,10 @@ ingester: - max_chunk_age: 2h - lifecycler: - final_sleep: 0s -- heartbeat_period: 5s - join_after: 30s - num_tokens: 512 - ring: - replication_factor: {{ .Stack.ReplicationFactor }} -- heartbeat_timeout: 1m - max_transfer_retries: 0 - wal: - enabled: true -@@ -164,14 +173,16 @@ limits_config: - per_stream_rate_limit: 3MB - per_stream_rate_limit_burst: 15MB - split_queries_by_interval: 30m -+{{- with .GossipRing }} - memberlist: - abort_if_cluster_join_fails: true -- bind_port: {{ .GossipRing.Port }} -+ bind_port: {{ .BindPort }} - join_members: -- - {{ .GossipRing.FQDN }}:{{ .GossipRing.Port }} -+ - {{ .MembersDiscoveryAddr }}:{{ .BindPort }} - max_join_backoff: 1m - max_join_retries: 10 - min_join_backoff: 1s -+{{- end }} - querier: - engine: - max_look_back_period: 30s -diff --git a/operator/internal/manifests/internal/config/options.go b/operator/internal/manifests/internal/config/options.go -index 8f4fa2428c8bc..557e7d570616a 100644 ---- a/operator/internal/manifests/internal/config/options.go -+++ b/operator/internal/manifests/internal/config/options.go -@@ -20,7 +20,7 @@ type Options struct { - Name string - Compactor Address - FrontendWorker Address -- GossipRing Address -+ GossipRing GossipRing - Querier Address - IndexGateway Address - Ruler Ruler -@@ -55,6 +55,18 @@ type Address struct { - Port int - } - -+// GossipRing defines the memberlist configuration -+type GossipRing struct { -+ // InstanceAddr is optional, defaults to private networks -+ InstanceAddr string -+ // InstancePort is required -+ InstancePort int -+ // BindPort is the port for listening to gossip messages -+ BindPort int -+ // MembersDiscoveryAddr is required -+ MembersDiscoveryAddr string -+} -+ - // Ruler configuration - type Ruler struct { - Enabled bool -diff --git a/operator/internal/manifests/memberlist.go b/operator/internal/manifests/memberlist.go -index 3a51e69bccc32..bc9377093aeb4 100644 ---- a/operator/internal/manifests/memberlist.go -+++ b/operator/internal/manifests/memberlist.go -@@ -3,10 +3,13 @@ package manifests - import ( - ""fmt"" - -- ""k8s.io/apimachinery/pkg/util/intstr"" -+ ""github.com/imdario/mergo"" - - corev1 ""k8s.io/api/core/v1"" - metav1 ""k8s.io/apimachinery/pkg/apis/meta/v1"" -+ ""k8s.io/apimachinery/pkg/util/intstr"" -+ -+ lokiv1 ""github.com/grafana/loki/operator/apis/loki/v1"" - ) - - // BuildLokiGossipRingService creates a k8s service for the gossip/memberlist members of the cluster -@@ -34,3 +37,50 @@ func BuildLokiGossipRingService(stackName string) *corev1.Service { - }, - } - } -+ -+func configureHashRingEnv(p *corev1.PodSpec, opts Options) error { -+ resetProxyVar(p, gossipInstanceAddrEnvVarName) -+ hashRing := opts.Stack.HashRing -+ -+ if hashRing == nil { -+ return nil -+ } -+ -+ if hashRing.Type != lokiv1.HashRingMemberList { -+ return nil -+ } -+ -+ if hashRing.MemberList == nil { -+ return nil -+ } -+ -+ switch hashRing.MemberList.InstanceAddrType { -+ case """", lokiv1.InstanceAddrDefault: -+ return nil -+ default: -+ // Proceed with appending env var -+ } -+ -+ src := corev1.Container{ -+ Env: []corev1.EnvVar{ -+ { -+ Name: gossipInstanceAddrEnvVarName, -+ ValueFrom: &corev1.EnvVarSource{ -+ FieldRef: &corev1.ObjectFieldSelector{ -+ APIVersion: ""v1"", -+ FieldPath: ""status.podIP"", -+ }, -+ }, -+ }, -+ }, -+ } -+ -+ for i, dst := range p.Containers { -+ if err := mergo.Merge(&dst, src, mergo.WithAppendSlice); err != nil { -+ return err -+ } -+ p.Containers[i] = dst -+ } -+ -+ return nil -+} -diff --git a/operator/internal/manifests/memberlist_test.go b/operator/internal/manifests/memberlist_test.go -new file mode 100644 -index 0000000000000..56d48b39e928f ---- /dev/null -+++ b/operator/internal/manifests/memberlist_test.go -@@ -0,0 +1,178 @@ -+package manifests -+ -+import ( -+ ""testing"" -+ -+ lokiv1 ""github.com/grafana/loki/operator/apis/loki/v1"" -+ ""github.com/stretchr/testify/require"" -+ v1 ""k8s.io/api/core/v1"" -+) -+ -+func TestConfigureHashRingEnv_UseDefaults_NoHashRingSpec(t *testing.T) { -+ opt := Options{ -+ Name: ""test"", -+ Namespace: ""test"", -+ Image: ""test"", -+ Stack: lokiv1.LokiStackSpec{ -+ Size: lokiv1.SizeOneXExtraSmall, -+ Template: &lokiv1.LokiTemplateSpec{ -+ Compactor: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ Distributor: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ Ingester: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ Querier: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ QueryFrontend: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ Gateway: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ IndexGateway: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ Ruler: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ }, -+ }, -+ } -+ -+ wantEnvVar := v1.EnvVar{ -+ ValueFrom: &v1.EnvVarSource{ -+ FieldRef: &v1.ObjectFieldSelector{ -+ APIVersion: ""v1"", -+ FieldPath: ""status.podIP"", -+ }, -+ }, -+ } -+ -+ for _, cs := range lokiContainers(t, opt) { -+ for _, c := range cs { -+ require.NotContains(t, c.Env, wantEnvVar, ""contains envVar %s for: %s"", gossipInstanceAddrEnvVarName, c.Name) -+ } -+ } -+} -+ -+func TestConfigureHashRingEnv_UseDefaults_WithCustomHashRingSpec(t *testing.T) { -+ opt := Options{ -+ Name: ""test"", -+ Namespace: ""test"", -+ Image: ""test"", -+ Stack: lokiv1.LokiStackSpec{ -+ Size: lokiv1.SizeOneXExtraSmall, -+ HashRing: &lokiv1.HashRingSpec{ -+ Type: lokiv1.HashRingMemberList, -+ MemberList: &lokiv1.MemberListSpec{ -+ InstanceAddrType: lokiv1.InstanceAddrDefault, -+ }, -+ }, -+ Template: &lokiv1.LokiTemplateSpec{ -+ Compactor: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ Distributor: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ Ingester: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ Querier: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ QueryFrontend: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ Gateway: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ IndexGateway: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ Ruler: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ }, -+ }, -+ } -+ -+ wantEnvVar := v1.EnvVar{ -+ ValueFrom: &v1.EnvVarSource{ -+ FieldRef: &v1.ObjectFieldSelector{ -+ APIVersion: ""v1"", -+ FieldPath: ""status.podIP"", -+ }, -+ }, -+ } -+ -+ for _, cs := range lokiContainers(t, opt) { -+ for _, c := range cs { -+ require.NotContains(t, c.Env, wantEnvVar, ""contains envVar %s for: %s"", gossipInstanceAddrEnvVarName, c.Name) -+ } -+ } -+} -+ -+func TestConfigureHashRingEnv_UseInstanceAddrPodIP(t *testing.T) { -+ opt := Options{ -+ Name: ""test"", -+ Namespace: ""test"", -+ Image: ""test"", -+ Stack: lokiv1.LokiStackSpec{ -+ Size: lokiv1.SizeOneXExtraSmall, -+ HashRing: &lokiv1.HashRingSpec{ -+ Type: lokiv1.HashRingMemberList, -+ MemberList: &lokiv1.MemberListSpec{ -+ InstanceAddrType: lokiv1.InstanceAddrPodIP, -+ }, -+ }, -+ Template: &lokiv1.LokiTemplateSpec{ -+ Compactor: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ Distributor: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ Ingester: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ Querier: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ QueryFrontend: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ Gateway: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ IndexGateway: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ Ruler: &lokiv1.LokiComponentSpec{ -+ Replicas: 1, -+ }, -+ }, -+ }, -+ } -+ -+ wantEnvVar := v1.EnvVar{ -+ Name: gossipInstanceAddrEnvVarName, -+ ValueFrom: &v1.EnvVarSource{ -+ FieldRef: &v1.ObjectFieldSelector{ -+ APIVersion: ""v1"", -+ FieldPath: ""status.podIP"", -+ }, -+ }, -+ } -+ -+ for _, cs := range lokiContainers(t, opt) { -+ for _, c := range cs { -+ require.Contains(t, c.Env, wantEnvVar, ""missing envVar %s for: %s"", gossipInstanceAddrEnvVarName, c.Name) -+ } -+ } -+} -diff --git a/operator/internal/manifests/querier.go b/operator/internal/manifests/querier.go -index 62d70e3011373..4d0bc3a7e79ba 100644 ---- a/operator/internal/manifests/querier.go -+++ b/operator/internal/manifests/querier.go -@@ -42,6 +42,10 @@ func BuildQuerier(opts Options) ([]client.Object, error) { - } - } - -+ if err := configureHashRingEnv(&deployment.Spec.Template.Spec, opts); err != nil { -+ return nil, err -+ } -+ - if err := configureProxyEnv(&deployment.Spec.Template.Spec, opts); err != nil { - return nil, err - } -@@ -82,6 +86,7 @@ func NewQuerierDeployment(opts Options) *appsv1.Deployment { - ""-target=querier"", - fmt.Sprintf(""-config.file=%s"", path.Join(config.LokiConfigMountDir, config.LokiConfigFileName)), - fmt.Sprintf(""-runtime-config.file=%s"", path.Join(config.LokiConfigMountDir, config.LokiRuntimeConfigFileName)), -+ ""-config.expand-env=true"", - }, - ReadinessProbe: lokiReadinessProbe(), - LivenessProbe: lokiLivenessProbe(), -diff --git a/operator/internal/manifests/query-frontend.go b/operator/internal/manifests/query-frontend.go -index d8e3cfae2cc71..405da3daa6a32 100644 ---- a/operator/internal/manifests/query-frontend.go -+++ b/operator/internal/manifests/query-frontend.go -@@ -37,6 +37,10 @@ func BuildQueryFrontend(opts Options) ([]client.Object, error) { - } - } - -+ if err := configureHashRingEnv(&deployment.Spec.Template.Spec, opts); err != nil { -+ return nil, err -+ } -+ - if err := configureProxyEnv(&deployment.Spec.Template.Spec, opts); err != nil { - return nil, err - } -@@ -77,6 +81,7 @@ func NewQueryFrontendDeployment(opts Options) *appsv1.Deployment { - ""-target=query-frontend"", - fmt.Sprintf(""-config.file=%s"", path.Join(config.LokiConfigMountDir, config.LokiConfigFileName)), - fmt.Sprintf(""-runtime-config.file=%s"", path.Join(config.LokiConfigMountDir, config.LokiRuntimeConfigFileName)), -+ ""-config.expand-env=true"", - }, - ReadinessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ -diff --git a/operator/internal/manifests/ruler.go b/operator/internal/manifests/ruler.go -index 8031c55a8334f..547ded9423f34 100644 ---- a/operator/internal/manifests/ruler.go -+++ b/operator/internal/manifests/ruler.go -@@ -49,6 +49,10 @@ func BuildRuler(opts Options) ([]client.Object, error) { - objs = configureRulerObjsForMode(opts) - } - -+ if err := configureHashRingEnv(&statefulSet.Spec.Template.Spec, opts); err != nil { -+ return nil, err -+ } -+ - if err := configureProxyEnv(&statefulSet.Spec.Template.Spec, opts); err != nil { - return nil, err - } -@@ -101,6 +105,7 @@ func NewRulerStatefulSet(opts Options) *appsv1.StatefulSet { - ""-target=ruler"", - fmt.Sprintf(""-config.file=%s"", path.Join(config.LokiConfigMountDir, config.LokiConfigFileName)), - fmt.Sprintf(""-runtime-config.file=%s"", path.Join(config.LokiConfigMountDir, config.LokiRuntimeConfigFileName)), -+ ""-config.expand-env=true"", - }, - ReadinessProbe: lokiReadinessProbe(), - LivenessProbe: lokiLivenessProbe(), -diff --git a/operator/internal/manifests/var.go b/operator/internal/manifests/var.go -index a2c4ef10ab2d2..0652795cdf227 100644 ---- a/operator/internal/manifests/var.go -+++ b/operator/internal/manifests/var.go -@@ -19,6 +19,8 @@ const ( - grpcPort = 9095 - protocolTCP = ""TCP"" - -+ gossipInstanceAddrEnvVarName = ""HASH_RING_INSTANCE_ADDR"" -+ - lokiHTTPPortName = ""metrics"" - lokiInternalHTTPPortName = ""healthchecks"" - lokiGRPCPortName = ""grpclb""",operator,"Add support for memberlist bind network configuration (#8672) - -Co-authored-by: Robert Jacob " -c3e282e0308a012ed4b2039eda47ba27fd55f8ef,2021-11-04 14:07:48,Periklis Tsirakidis,Fix local kind deployments (#101),False,"diff --git a/bundle/manifests/loki-operator.clusterserviceversion.yaml b/bundle/manifests/loki-operator.clusterserviceversion.yaml -index bace648512a94..66f3f696a044c 100644 ---- a/bundle/manifests/loki-operator.clusterserviceversion.yaml -+++ b/bundle/manifests/loki-operator.clusterserviceversion.yaml -@@ -581,6 +581,7 @@ spec: - containers: - - args: - - --with-lokistack-gateway -+ - --with-lokistack-gateway-route - - --with-cert-signing-service - - --with-service-monitors - - --with-tls-service-monitors -diff --git a/config/overlays/development/kustomization.yaml b/config/overlays/development/kustomization.yaml -index d2d9b4d7979a7..327464bacc97c 100644 ---- a/config/overlays/development/kustomization.yaml -+++ b/config/overlays/development/kustomization.yaml -@@ -19,4 +19,4 @@ commonLabels: - - patchesStrategicMerge: - - manager_related_image_patch.yaml --- manager_run_flags_patch.yaml -+- manager_image_pull_policy_patch.yaml -diff --git a/config/overlays/development/manager_run_flags_patch.yaml b/config/overlays/development/manager_image_pull_policy_patch.yaml -similarity index 72% -rename from config/overlays/development/manager_run_flags_patch.yaml -rename to config/overlays/development/manager_image_pull_policy_patch.yaml -index 8c9ce6420f9d7..5e9ea970bca2b 100644 ---- a/config/overlays/development/manager_run_flags_patch.yaml -+++ b/config/overlays/development/manager_image_pull_policy_patch.yaml -@@ -7,5 +7,4 @@ spec: - spec: - containers: - - name: manager -- args: -- - ""--with-lokistack-gateway"" -+ imagePullPolicy: Always -diff --git a/config/overlays/openshift/manager_run_flags_patch.yaml b/config/overlays/openshift/manager_run_flags_patch.yaml -index bd3f85f6669c0..23e9034d38749 100644 ---- a/config/overlays/openshift/manager_run_flags_patch.yaml -+++ b/config/overlays/openshift/manager_run_flags_patch.yaml -@@ -9,6 +9,7 @@ spec: - - name: manager - args: - - ""--with-lokistack-gateway"" -+ - ""--with-lokistack-gateway-route"" - - ""--with-cert-signing-service"" - - ""--with-service-monitors"" - - ""--with-tls-service-monitors"" -diff --git a/controllers/lokistack_controller.go b/controllers/lokistack_controller.go -index 742beb27b0117..bb944d3fac96e 100644 ---- a/controllers/lokistack_controller.go -+++ b/controllers/lokistack_controller.go -@@ -145,7 +145,7 @@ func (r *LokiStackReconciler) SetupWithManager(mgr manager.Manager) error { - } - - func (r *LokiStackReconciler) buildController(bld k8s.Builder) error { -- return bld. -+ bld = bld. - For(&lokiv1beta1.LokiStack{}, createOrUpdateOnlyPred). - Owns(&corev1.ConfigMap{}, updateOrDeleteOnlyPred). - Owns(&corev1.ServiceAccount{}, updateOrDeleteOnlyPred). -@@ -153,8 +153,13 @@ func (r *LokiStackReconciler) buildController(bld k8s.Builder) error { - Owns(&appsv1.Deployment{}, updateOrDeleteOnlyPred). - Owns(&appsv1.StatefulSet{}, updateOrDeleteOnlyPred). - Owns(&rbacv1.ClusterRole{}, updateOrDeleteOnlyPred). -- Owns(&rbacv1.ClusterRoleBinding{}, updateOrDeleteOnlyPred). -- Owns(&networkingv1.Ingress{}, updateOrDeleteOnlyPred). -- Owns(&routev1.Route{}, updateOrDeleteOnlyPred). -- Complete(r) -+ Owns(&rbacv1.ClusterRoleBinding{}, updateOrDeleteOnlyPred) -+ -+ if r.Flags.EnableGatewayRoute { -+ bld = bld.Owns(&routev1.Route{}, updateOrDeleteOnlyPred) -+ } else { -+ bld = bld.Owns(&networkingv1.Ingress{}, updateOrDeleteOnlyPred) -+ } -+ -+ return bld.Complete(r) - } -diff --git a/controllers/lokistack_controller_test.go b/controllers/lokistack_controller_test.go -index 7f26158ceb283..1d279ba3696b8 100644 ---- a/controllers/lokistack_controller_test.go -+++ b/controllers/lokistack_controller_test.go -@@ -9,6 +9,7 @@ import ( - ""github.com/ViaQ/logerr/log"" - lokiv1beta1 ""github.com/ViaQ/loki-operator/api/v1beta1"" - ""github.com/ViaQ/loki-operator/internal/external/k8s/k8sfakes"" -+ ""github.com/ViaQ/loki-operator/internal/manifests"" - routev1 ""github.com/openshift/api/route/v1"" - ""github.com/stretchr/testify/require"" - -@@ -70,65 +71,82 @@ func TestLokiStackController_RegistersCustomResourceForCreateOrUpdate(t *testing - } - - func TestLokiStackController_RegisterOwnedResourcesForUpdateOrDeleteOnly(t *testing.T) { -- b := &k8sfakes.FakeBuilder{} - k := &k8sfakes.FakeClient{} -- c := &LokiStackReconciler{Client: k, Scheme: scheme} -- -- b.ForReturns(b) -- b.OwnsReturns(b) -- -- err := c.buildController(b) -- require.NoError(t, err) -- -- // Require Owns-Calls for all owned resources -- require.Equal(t, 9, b.OwnsCallCount()) - - // Require owned resources - type test struct { -- obj client.Object -- pred builder.OwnsOption -+ obj client.Object -+ index int -+ flags manifests.FeatureFlags -+ pred builder.OwnsOption - } - table := []test{ - { -- obj: &corev1.ConfigMap{}, -- pred: updateOrDeleteOnlyPred, -+ obj: &corev1.ConfigMap{}, -+ index: 0, -+ pred: updateOrDeleteOnlyPred, - }, - { -- obj: &corev1.ServiceAccount{}, -- pred: updateOrDeleteOnlyPred, -+ obj: &corev1.ServiceAccount{}, -+ index: 1, -+ pred: updateOrDeleteOnlyPred, - }, - { -- obj: &corev1.Service{}, -- pred: updateOrDeleteOnlyPred, -+ obj: &corev1.Service{}, -+ index: 2, -+ pred: updateOrDeleteOnlyPred, - }, - { -- obj: &appsv1.Deployment{}, -- pred: updateOrDeleteOnlyPred, -+ obj: &appsv1.Deployment{}, -+ index: 3, -+ pred: updateOrDeleteOnlyPred, - }, - { -- obj: &appsv1.StatefulSet{}, -- pred: updateOrDeleteOnlyPred, -+ obj: &appsv1.StatefulSet{}, -+ index: 4, -+ pred: updateOrDeleteOnlyPred, - }, - { -- obj: &rbacv1.ClusterRole{}, -- pred: updateOrDeleteOnlyPred, -+ obj: &rbacv1.ClusterRole{}, -+ index: 5, -+ pred: updateOrDeleteOnlyPred, - }, - { -- obj: &rbacv1.ClusterRoleBinding{}, -- pred: updateOrDeleteOnlyPred, -+ obj: &rbacv1.ClusterRoleBinding{}, -+ index: 6, -+ pred: updateOrDeleteOnlyPred, - }, - { -- obj: &networkingv1.Ingress{}, -+ obj: &networkingv1.Ingress{}, -+ index: 7, -+ flags: manifests.FeatureFlags{ -+ EnableGatewayRoute: false, -+ }, - pred: updateOrDeleteOnlyPred, - }, - { -- obj: &routev1.Route{}, -+ obj: &routev1.Route{}, -+ index: 7, -+ flags: manifests.FeatureFlags{ -+ EnableGatewayRoute: true, -+ }, - pred: updateOrDeleteOnlyPred, - }, - } -- for i, tst := range table { -+ for _, tst := range table { -+ b := &k8sfakes.FakeBuilder{} -+ b.ForReturns(b) -+ b.OwnsReturns(b) -+ -+ c := &LokiStackReconciler{Client: k, Scheme: scheme, Flags: tst.flags} -+ err := c.buildController(b) -+ require.NoError(t, err) -+ -+ // Require Owns-Calls for all owned resources -+ require.Equal(t, 8, b.OwnsCallCount()) -+ - // Require Owns-call options to have delete predicate only -- obj, opts := b.OwnsArgsForCall(i) -+ obj, opts := b.OwnsArgsForCall(tst.index) - require.Equal(t, tst.obj, obj) - require.Equal(t, tst.pred, opts[0]) - } -diff --git a/hack/lokistack_dev.yaml b/hack/lokistack_dev.yaml -index 2db4e9e10d891..8a7c906b1ab01 100644 ---- a/hack/lokistack_dev.yaml -+++ b/hack/lokistack_dev.yaml -@@ -8,4 +8,4 @@ spec: - storage: - secret: - name: test -- storageClassName: gp2 -+ storageClassName: standard -diff --git a/internal/manifests/gateway.go b/internal/manifests/gateway.go -index ad93703a75646..4af8298bf710e 100644 ---- a/internal/manifests/gateway.go -+++ b/internal/manifests/gateway.go -@@ -66,7 +66,6 @@ func BuildGateway(opts Options) ([]client.Object, error) { - // NewGatewayDeployment creates a deployment object for a lokiStack-gateway - func NewGatewayDeployment(opts Options, sha1C string) *appsv1.Deployment { - podSpec := corev1.PodSpec{ -- ServiceAccountName: GatewayName(opts.Name), - Volumes: []corev1.Volume{ - { - Name: ""rbac"", -diff --git a/internal/manifests/gateway_tenants.go b/internal/manifests/gateway_tenants.go -index 29a96d5d7db1b..6fe5bdbe14a6d 100644 ---- a/internal/manifests/gateway_tenants.go -+++ b/internal/manifests/gateway_tenants.go -@@ -18,6 +18,10 @@ import ( - // tenant mode. Currently nothing is applied for modes static and dynamic. For mode openshift-logging - // the tenant spec is filled with defaults for authentication and authorization. - func ApplyGatewayDefaultOptions(opts *Options) error { -+ if opts.Stack.Tenants == nil { -+ return nil -+ } -+ - switch opts.Stack.Tenants.Mode { - case lokiv1beta1.Static, lokiv1beta1.Dynamic: - return nil // continue using user input -@@ -37,6 +41,7 @@ func ApplyGatewayDefaultOptions(opts *Options) error { - if err := mergo.Merge(&opts.OpenShiftOptions, &defaults, mergo.WithOverride); err != nil { - return kverrors.Wrap(err, ""failed to merge defaults for mode openshift logging"") - } -+ - } - - return nil -diff --git a/internal/manifests/gateway_tenants_test.go b/internal/manifests/gateway_tenants_test.go -index 5de87474bf48d..0f7252d0a9dd8 100644 ---- a/internal/manifests/gateway_tenants_test.go -+++ b/internal/manifests/gateway_tenants_test.go -@@ -400,6 +400,7 @@ func TestConfigureDeploymentForMode(t *testing.T) { - Spec: appsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ -+ ServiceAccountName: ""gateway"", - Containers: []corev1.Container{ - { - Name: gatewayContainerName, -diff --git a/internal/manifests/openshift/configure.go b/internal/manifests/openshift/configure.go -index 78d1ebd448447..dec2f139340c7 100644 ---- a/internal/manifests/openshift/configure.go -+++ b/internal/manifests/openshift/configure.go -@@ -92,6 +92,7 @@ func ConfigureGatewayDeployment( - gwContainer.Args = gwArgs - - p := corev1.PodSpec{ -+ ServiceAccountName: d.GetName(), - Containers: []corev1.Container{ - *gwContainer, - newOPAOpenShiftContainer(sercretVolumeName, tlsDir, certFile, keyFile, withTLS), -diff --git a/internal/manifests/options.go b/internal/manifests/options.go -index 2d0564d0c4cf1..aa866c2bf3e52 100644 ---- a/internal/manifests/options.go -+++ b/internal/manifests/options.go -@@ -42,6 +42,7 @@ type FeatureFlags struct { - EnableServiceMonitors bool - EnableTLSServiceMonitorConfig bool - EnableGateway bool -+ EnableGatewayRoute bool - } - - // TenantSecrets for clientID, clientSecret and issuerCAPath for tenant's authentication. -diff --git a/main.go b/main.go -index e4fd5b84b48d7..b8309b19cc55d 100644 ---- a/main.go -+++ b/main.go -@@ -61,6 +61,7 @@ func main() { - enableServiceMonitors bool - enableTLSServiceMonitors bool - enableGateway bool -+ enableGatewayRoute bool - ) - - flag.StringVar(&metricsAddr, ""metrics-bind-address"", "":8080"", ""The address the metric endpoint binds to."") -@@ -75,6 +76,8 @@ func main() { - ""Enables loading of a prometheus service monitor."") - flag.BoolVar(&enableGateway, ""with-lokistack-gateway"", false, - ""Enables the manifest creation for the entire lokistack-gateway."") -+ flag.BoolVar(&enableGatewayRoute, ""with-lokistack-gateway-route"", false, -+ ""Enables the usage of Route for the lokistack-gateway instead of Ingress (OCP Only!)"") - flag.Parse() - - log.Init(""loki-operator"") -@@ -86,7 +89,10 @@ func main() { - - if enableGateway { - utilruntime.Must(configv1.AddToScheme(scheme)) -- utilruntime.Must(routev1.AddToScheme(scheme)) -+ -+ if enableGatewayRoute { -+ utilruntime.Must(routev1.AddToScheme(scheme)) -+ } - } - - mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ -@@ -107,6 +113,7 @@ func main() { - EnableServiceMonitors: enableServiceMonitors, - EnableTLSServiceMonitorConfig: enableTLSServiceMonitors, - EnableGateway: enableGateway, -+ EnableGatewayRoute: enableGatewayRoute, - } - - if err = (&controllers.LokiStackReconciler{",unknown,Fix local kind deployments (#101) -49064aed64b99d57980866269888b05203256527,2025-03-06 02:26:43,Marshall Ford,"docs: remove deployment.environment from list of otel resource attrs stored as labels (#16427) - -Signed-off-by: Marshall Ford -Co-authored-by: J Stickler ",False,"diff --git a/docs/sources/send-data/otel/_index.md b/docs/sources/send-data/otel/_index.md -index ba082c30f0699..98145810b56cf 100644 ---- a/docs/sources/send-data/otel/_index.md -+++ b/docs/sources/send-data/otel/_index.md -@@ -78,7 +78,6 @@ Since the OpenTelemetry protocol differs from the Loki storage model, here is ho - - cloud.availability_zone - - cloud.region - - container.name -- - deployment.environment - - deployment.environment.name - - k8s.cluster.name - - k8s.container.name",docs,"remove deployment.environment from list of otel resource attrs stored as labels (#16427) - -Signed-off-by: Marshall Ford -Co-authored-by: J Stickler " -4451d56d6b9a9d2eb54ed75d3d2c8fe0db6908eb,2024-07-17 12:26:26,Cyril Tovena,fix: Fixes span name of serializeRounTripper (#13541),False,"diff --git a/pkg/querier/queryrange/serialize.go b/pkg/querier/queryrange/serialize.go -index d7a5eb125d30c..6f292a229f7eb 100644 ---- a/pkg/querier/queryrange/serialize.go -+++ b/pkg/querier/queryrange/serialize.go -@@ -25,7 +25,7 @@ func NewSerializeRoundTripper(next queryrangebase.Handler, codec queryrangebase. - - func (rt *serializeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { - ctx := r.Context() -- sp, ctx := opentracing.StartSpanFromContext(ctx, ""limitedRoundTripper.do"") -+ sp, ctx := opentracing.StartSpanFromContext(ctx, ""serializeRoundTripper.do"") - defer sp.Finish() - - request, err := rt.codec.DecodeRequest(ctx, r, nil)",fix,Fixes span name of serializeRounTripper (#13541) -d32f3f7b68e77e3268a61d7bce152bafc54fb2b4,2022-11-07 16:00:17,Danny Kopping,"Querier: remove expensive & low-value debug log line (#7532) - -Removes a low-value debug log line that, in one recent example, can logs -tens of thousands of times in a short span of time. - -Signed-off-by: Danny Kopping ",False,"diff --git a/pkg/storage/stores/series/index/caching_index_client.go b/pkg/storage/stores/series/index/caching_index_client.go -index 79461659995ee..49760fb3d456b 100644 ---- a/pkg/storage/stores/series/index/caching_index_client.go -+++ b/pkg/storage/stores/series/index/caching_index_client.go -@@ -296,7 +296,6 @@ func isChunksQuery(q Query) bool { - } - - func (s *cachingIndexClient) cacheStore(ctx context.Context, keys []string, batches []ReadBatch) error { -- logger := util_log.WithContext(ctx, s.logger) - cachePuts.Add(float64(len(keys))) - - // We're doing the hashing to handle unicode and key len properly. -@@ -304,9 +303,6 @@ func (s *cachingIndexClient) cacheStore(ctx context.Context, keys []string, batc - hashed := make([]string, 0, len(keys)) - bufs := make([][]byte, 0, len(batches)) - for i := range keys { -- if len(batches[i].Entries) != 0 { -- level.Debug(logger).Log(""msg"", ""caching index entries"", ""key"", keys[i], ""count"", len(batches[i].Entries)) -- } - hashed = append(hashed, cache.HashKey(keys[i])) - out, err := proto.Marshal(&batches[i]) - if err != nil {",Querier,"remove expensive & low-value debug log line (#7532) - -Removes a low-value debug log line that, in one recent example, can logs -tens of thousands of times in a short span of time. - -Signed-off-by: Danny Kopping " -cc941fe42a93fb6f6233f5e67d9cc524d12737e1,2024-03-28 15:17:00,Anton Kolesnikov,chore: refactor line filter MatchType (#12388),False,"diff --git a/pkg/loghttp/params.go b/pkg/loghttp/params.go -index 654c52e7725df..74597a1970d4f 100644 ---- a/pkg/loghttp/params.go -+++ b/pkg/loghttp/params.go -@@ -11,9 +11,9 @@ import ( - ""github.com/c2h5oh/datasize"" - ""github.com/pkg/errors"" - ""github.com/prometheus/common/model"" -- ""github.com/prometheus/prometheus/model/labels"" - - ""github.com/grafana/loki/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/logql/log"" - ""github.com/grafana/loki/pkg/logql/syntax"" - ) - -@@ -193,7 +193,7 @@ func parseRegexQuery(httpRequest *http.Request) (string, error) { - if err != nil { - return """", err - } -- newExpr, err := syntax.AddFilterExpr(expr, labels.MatchRegexp, """", regexp) -+ newExpr, err := syntax.AddFilterExpr(expr, log.LineMatchRegexp, """", regexp) - if err != nil { - return """", err - } -diff --git a/pkg/logql/log/filter.go b/pkg/logql/log/filter.go -index 7b613947c8b8b..164741f4c8c96 100644 ---- a/pkg/logql/log/filter.go -+++ b/pkg/logql/log/filter.go -@@ -14,6 +14,32 @@ import ( - ""github.com/grafana/loki/pkg/util"" - ) - -+// LineMatchType is an enum for line matching types. -+type LineMatchType int -+ -+// Possible LineMatchTypes. -+const ( -+ LineMatchEqual LineMatchType = iota -+ LineMatchNotEqual -+ LineMatchRegexp -+ LineMatchNotRegexp -+) -+ -+func (t LineMatchType) String() string { -+ switch t { -+ case LineMatchEqual: -+ return ""|="" -+ case LineMatchNotEqual: -+ return ""!="" -+ case LineMatchRegexp: -+ return ""|~"" -+ case LineMatchNotRegexp: -+ return ""!~"" -+ default: -+ return """" -+ } -+} -+ - // Checker is an interface that matches against the input line or regexp. - type Checker interface { - Test(line []byte, caseInsensitive bool, equal bool) bool -@@ -517,15 +543,15 @@ func (f containsAllFilter) Matches(test Checker) bool { - } - - // NewFilter creates a new line filter from a match string and type. --func NewFilter(match string, mt labels.MatchType) (Filterer, error) { -+func NewFilter(match string, mt LineMatchType) (Filterer, error) { - switch mt { -- case labels.MatchRegexp: -+ case LineMatchRegexp: - return parseRegexpFilter(match, true, false) -- case labels.MatchNotRegexp: -+ case LineMatchNotRegexp: - return parseRegexpFilter(match, false, false) -- case labels.MatchEqual: -+ case LineMatchEqual: - return newContainsFilter([]byte(match), false), nil -- case labels.MatchNotEqual: -+ case LineMatchNotEqual: - return NewNotFilter(newContainsFilter([]byte(match), false)), nil - default: - return nil, fmt.Errorf(""unknown matcher: %v"", match) -diff --git a/pkg/logql/log/ip.go b/pkg/logql/log/ip.go -index 1508432d245c5..851cc1a9fa6c7 100644 ---- a/pkg/logql/log/ip.go -+++ b/pkg/logql/log/ip.go -@@ -6,7 +6,6 @@ import ( - ""net/netip"" - ""unicode"" - -- ""github.com/prometheus/prometheus/model/labels"" - ""go4.org/netipx"" - ) - -@@ -27,14 +26,14 @@ type IPMatcher interface{} - - type IPLineFilter struct { - ip *ipFilter -- ty labels.MatchType -+ ty LineMatchType - } - - // NewIPLineFilter is used to construct ip filter as a `LineFilter` --func NewIPLineFilter(pattern string, ty labels.MatchType) (*IPLineFilter, error) { -+func NewIPLineFilter(pattern string, ty LineMatchType) (*IPLineFilter, error) { - // check if `ty` supported in ip matcher. - switch ty { -- case labels.MatchEqual, labels.MatchNotEqual: -+ case LineMatchEqual, LineMatchNotEqual: - default: - return nil, ErrIPFilterInvalidOperation - } -@@ -69,8 +68,8 @@ func (f *IPLineFilter) RequiredLabelNames() []string { - return []string{} // empty for line filter - } - --func (f *IPLineFilter) filterTy(line []byte, ty labels.MatchType) bool { -- if ty == labels.MatchNotEqual { -+func (f *IPLineFilter) filterTy(line []byte, ty LineMatchType) bool { -+ if ty == LineMatchNotEqual { - return !f.ip.filter(line) - } - return f.ip.filter(line) -diff --git a/pkg/logql/log/ip_test.go b/pkg/logql/log/ip_test.go -index 105b3badd58f0..32b98169f7a60 100644 ---- a/pkg/logql/log/ip_test.go -+++ b/pkg/logql/log/ip_test.go -@@ -189,7 +189,7 @@ func Test_IPLineFilterTy(t *testing.T) { - cases := []struct { - name string - pat string -- ty labels.MatchType -+ ty LineMatchType - line []byte - expectedMatch bool - -@@ -199,21 +199,21 @@ func Test_IPLineFilterTy(t *testing.T) { - { - name: ""equal operator"", - pat: ""192.168.0.1"", -- ty: labels.MatchEqual, -+ ty: LineMatchEqual, - line: []byte(""192.168.0.1""), - expectedMatch: true, - }, - { - name: ""not equal operator"", - pat: ""192.168.0.2"", -- ty: labels.MatchNotEqual, -+ ty: LineMatchNotEqual, - line: []byte(""192.168.0.1""), // match because !=ip(""192.168.0.2"") - expectedMatch: true, - }, - { - name: ""regex not equal"", - pat: ""192.168.0.2"", -- ty: labels.MatchNotRegexp, // not supported -+ ty: LineMatchNotRegexp, // not supported - line: []byte(""192.168.0.1""), - fail: true, - err: ErrIPFilterInvalidOperation, -@@ -221,7 +221,7 @@ func Test_IPLineFilterTy(t *testing.T) { - { - name: ""regex equal"", - pat: ""192.168.0.2"", -- ty: labels.MatchRegexp, // not supported -+ ty: LineMatchRegexp, // not supported - line: []byte(""192.168.0.1""), - fail: true, - err: ErrIPFilterInvalidOperation, -diff --git a/pkg/logql/log/metrics_extraction_test.go b/pkg/logql/log/metrics_extraction_test.go -index f059271cb8c65..9cc5ff4411f51 100644 ---- a/pkg/logql/log/metrics_extraction_test.go -+++ b/pkg/logql/log/metrics_extraction_test.go -@@ -346,7 +346,7 @@ func TestNewLineSampleExtractor(t *testing.T) { - require.Equal(t, 1., f) - assertLabelResult(t, lbs, l) - -- stage := mustFilter(NewFilter(""foo"", labels.MatchEqual)).ToStage() -+ stage := mustFilter(NewFilter(""foo"", LineMatchEqual)).ToStage() - se, err = NewLineSampleExtractor(BytesExtractor, []Stage{stage}, []string{""namespace""}, false, false) - require.NoError(t, err) - -@@ -404,7 +404,7 @@ func TestNewLineSampleExtractorWithStructuredMetadata(t *testing.T) { - se, err = NewLineSampleExtractor(BytesExtractor, []Stage{ - NewStringLabelFilter(labels.MustNewMatcher(labels.MatchEqual, ""foo"", ""bar"")), - NewStringLabelFilter(labels.MustNewMatcher(labels.MatchEqual, ""user"", ""bob"")), -- mustFilter(NewFilter(""foo"", labels.MatchEqual)).ToStage(), -+ mustFilter(NewFilter(""foo"", LineMatchEqual)).ToStage(), - }, []string{""foo""}, false, false) - require.NoError(t, err) - -diff --git a/pkg/logql/log/pipeline_test.go b/pkg/logql/log/pipeline_test.go -index 12a1a61fcc2cf..9b2aff1332d47 100644 ---- a/pkg/logql/log/pipeline_test.go -+++ b/pkg/logql/log/pipeline_test.go -@@ -240,7 +240,7 @@ func newPipelineFilter(start, end int64, lbls, structuredMetadata labels.Labels, - stages = append(stages, s) - }) - -- stages = append(stages, mustFilter(NewFilter(filter, labels.MatchEqual)).ToStage()) -+ stages = append(stages, mustFilter(NewFilter(filter, LineMatchEqual)).ToStage()) - - return PipelineFilter{start, end, matchers, NewPipeline(stages)} - } -@@ -527,7 +527,7 @@ func Benchmark_Pipeline(b *testing.B) { - b.ReportAllocs() - - stages := []Stage{ -- mustFilter(NewFilter(""metrics.go"", labels.MatchEqual)).ToStage(), -+ mustFilter(NewFilter(""metrics.go"", LineMatchEqual)).ToStage(), - NewLogfmtParser(false, false), - NewAndLabelFilter( - NewDurationLabelFilter(LabelFilterGreaterThan, ""duration"", 10*time.Millisecond), -@@ -611,7 +611,7 @@ func jsonBenchmark(b *testing.B, parser Stage) { - b.ReportAllocs() - - p := NewPipeline([]Stage{ -- mustFilter(NewFilter(""metrics.go"", labels.MatchEqual)).ToStage(), -+ mustFilter(NewFilter(""metrics.go"", LineMatchEqual)).ToStage(), - parser, - }) - line := []byte(`{""ts"":""2020-12-27T09:15:54.333026285Z"",""error"":""action could not be completed"", ""context"":{""file"": ""metrics.go""}}`) -@@ -643,7 +643,7 @@ func invalidJSONBenchmark(b *testing.B, parser Stage) { - b.ReportAllocs() - - p := NewPipeline([]Stage{ -- mustFilter(NewFilter(""invalid json"", labels.MatchEqual)).ToStage(), -+ mustFilter(NewFilter(""invalid json"", LineMatchEqual)).ToStage(), - parser, - }) - line := []byte(`invalid json`) -@@ -696,7 +696,7 @@ func logfmtBenchmark(b *testing.B, parser Stage) { - b.ReportAllocs() - - p := NewPipeline([]Stage{ -- mustFilter(NewFilter(""ts"", labels.MatchEqual)).ToStage(), -+ mustFilter(NewFilter(""ts"", LineMatchEqual)).ToStage(), - parser, - }) - -diff --git a/pkg/logql/shardmapper_test.go b/pkg/logql/shardmapper_test.go -index 472bc51806041..c6b8e9c4b34cc 100644 ---- a/pkg/logql/shardmapper_test.go -+++ b/pkg/logql/shardmapper_test.go -@@ -7,6 +7,7 @@ import ( - ""github.com/prometheus/prometheus/model/labels"" - ""github.com/stretchr/testify/require"" - -+ ""github.com/grafana/loki/pkg/logql/log"" - ""github.com/grafana/loki/pkg/logql/syntax"" - ""github.com/grafana/loki/pkg/logqlmodel"" - ""github.com/grafana/loki/pkg/querier/astmapper"" -@@ -529,7 +530,7 @@ func TestMapping(t *testing.T) { - MultiStages: syntax.MultiStageExpr{ - &syntax.LineFilterExpr{ - LineFilter: syntax.LineFilter{ -- Ty: labels.MatchEqual, -+ Ty: log.LineMatchEqual, - Match: ""error"", - Op: """", - }, -@@ -550,7 +551,7 @@ func TestMapping(t *testing.T) { - MultiStages: syntax.MultiStageExpr{ - &syntax.LineFilterExpr{ - LineFilter: syntax.LineFilter{ -- Ty: labels.MatchEqual, -+ Ty: log.LineMatchEqual, - Match: ""error"", - Op: """", - }, -diff --git a/pkg/logql/syntax/ast.go b/pkg/logql/syntax/ast.go -index 060fc3cd11711..e1b796c4fbedd 100644 ---- a/pkg/logql/syntax/ast.go -+++ b/pkg/logql/syntax/ast.go -@@ -329,7 +329,7 @@ func (e *PipelineExpr) HasFilter() bool { - } - - type LineFilter struct { -- Ty labels.MatchType -+ Ty log.LineMatchType - Match string - Op string - } -@@ -342,7 +342,7 @@ type LineFilterExpr struct { - implicit - } - --func newLineFilterExpr(ty labels.MatchType, op, match string) *LineFilterExpr { -+func newLineFilterExpr(ty log.LineMatchType, op, match string) *LineFilterExpr { - return &LineFilterExpr{ - LineFilter: LineFilter{ - Ty: ty, -@@ -355,7 +355,7 @@ func newLineFilterExpr(ty labels.MatchType, op, match string) *LineFilterExpr { - func newOrLineFilter(left, right *LineFilterExpr) *LineFilterExpr { - right.Ty = left.Ty - -- if left.Ty == labels.MatchEqual || left.Ty == labels.MatchRegexp { -+ if left.Ty == log.LineMatchEqual || left.Ty == log.LineMatchRegexp { - left.Or = right - right.IsOrChild = true - return left -@@ -389,7 +389,7 @@ func (e *LineFilterExpr) Accept(v RootVisitor) { - } - - // AddFilterExpr adds a filter expression to a logselector expression. --func AddFilterExpr(expr LogSelectorExpr, ty labels.MatchType, op, match string) (LogSelectorExpr, error) { -+func AddFilterExpr(expr LogSelectorExpr, ty log.LineMatchType, op, match string) (LogSelectorExpr, error) { - filter := newLineFilterExpr(ty, op, match) - switch e := expr.(type) { - case *MatchersExpr: -@@ -412,16 +412,7 @@ func (e *LineFilterExpr) String() string { - } - - if !e.IsOrChild { // Only write the type when we're not chaining ""or"" filters -- switch e.Ty { -- case labels.MatchRegexp: -- sb.WriteString(""|~"") -- case labels.MatchNotRegexp: -- sb.WriteString(""!~"") -- case labels.MatchEqual: -- sb.WriteString(""|="") -- case labels.MatchNotEqual: -- sb.WriteString(""!="") -- } -+ sb.WriteString(e.Ty.String()) - sb.WriteString("" "") - } - -diff --git a/pkg/logql/syntax/ast_test.go b/pkg/logql/syntax/ast_test.go -index ece470516eb45..95f654d2c647f 100644 ---- a/pkg/logql/syntax/ast_test.go -+++ b/pkg/logql/syntax/ast_test.go -@@ -449,16 +449,16 @@ func Test_FilterMatcher(t *testing.T) { - - func TestOrLineFilterTypes(t *testing.T) { - for _, tt := range []struct { -- ty labels.MatchType -+ ty log.LineMatchType - }{ -- {labels.MatchEqual}, -- {labels.MatchNotEqual}, -- {labels.MatchRegexp}, -- {labels.MatchNotRegexp}, -+ {log.LineMatchEqual}, -+ {log.LineMatchNotEqual}, -+ {log.LineMatchRegexp}, -+ {log.LineMatchNotRegexp}, - } { - t.Run(""right inherits left's type"", func(t *testing.T) { - left := &LineFilterExpr{LineFilter: LineFilter{Ty: tt.ty, Match: ""something""}} -- right := &LineFilterExpr{LineFilter: LineFilter{Ty: labels.MatchEqual, Match: ""something""}} -+ right := &LineFilterExpr{LineFilter: LineFilter{Ty: log.LineMatchEqual, Match: ""something""}} - - _ = newOrLineFilter(left, right) - require.Equal(t, tt.ty, right.Ty) -diff --git a/pkg/logql/syntax/expr.y b/pkg/logql/syntax/expr.y -index 7e801480f4808..043642d526ad1 100644 ---- a/pkg/logql/syntax/expr.y -+++ b/pkg/logql/syntax/expr.y -@@ -11,7 +11,7 @@ import ( - - %union{ - Expr Expr -- Filter labels.MatchType -+ Filter log.LineMatchType - Grouping *Grouping - Labels []string - LogExpr LogSelectorExpr -@@ -239,10 +239,10 @@ labelReplaceExpr: - ; - - filter: -- PIPE_MATCH { $$ = labels.MatchRegexp } -- | PIPE_EXACT { $$ = labels.MatchEqual } -- | NRE { $$ = labels.MatchNotRegexp } -- | NEQ { $$ = labels.MatchNotEqual } -+ PIPE_MATCH { $$ = log.LineMatchRegexp } -+ | PIPE_EXACT { $$ = log.LineMatchEqual } -+ | NRE { $$ = log.LineMatchNotRegexp } -+ | NEQ { $$ = log.LineMatchNotEqual } - ; - - selector: -@@ -287,9 +287,9 @@ filterOp: - ; - - orFilter: -- STRING { $$ = newLineFilterExpr(labels.MatchEqual, """", $1) } -- | filterOp OPEN_PARENTHESIS STRING CLOSE_PARENTHESIS { $$ = newLineFilterExpr(labels.MatchEqual, $1, $3) } -- | STRING OR orFilter { $$ = newOrLineFilter(newLineFilterExpr(labels.MatchEqual, """", $1), $3) } -+ STRING { $$ = newLineFilterExpr(log.LineMatchEqual, """", $1) } -+ | filterOp OPEN_PARENTHESIS STRING CLOSE_PARENTHESIS { $$ = newLineFilterExpr(log.LineMatchEqual, $1, $3) } -+ | STRING OR orFilter { $$ = newOrLineFilter(newLineFilterExpr(log.LineMatchEqual, """", $1), $3) } - ; - - lineFilter: -diff --git a/pkg/logql/syntax/expr.y.go b/pkg/logql/syntax/expr.y.go -index 1f38ab579f10b..41da7466fbc20 100644 ---- a/pkg/logql/syntax/expr.y.go -+++ b/pkg/logql/syntax/expr.y.go -@@ -4,7 +4,6 @@ package syntax - - import __yyfmt__ ""fmt"" - -- - import ( - ""github.com/grafana/loki/pkg/logql/log"" - ""github.com/prometheus/prometheus/model/labels"" -@@ -14,7 +13,7 @@ import ( - type exprSymType struct { - yys int - Expr Expr -- Filter labels.MatchType -+ Filter log.LineMatchType - Grouping *Grouping - Labels []string - LogExpr LogSelectorExpr -@@ -266,7 +265,6 @@ const exprEofCode = 1 - const exprErrCode = 2 - const exprInitialStackSize = 16 - -- - var exprExca = [...]int{ - -1, 1, - 1, -1, -@@ -554,7 +552,6 @@ var exprErrorMessages = [...]struct { - msg string - }{} - -- - /* parser for yacc output */ - - var ( -@@ -1162,22 +1159,22 @@ exprdefault: - case 57: - exprDollar = exprS[exprpt-1 : exprpt+1] - { -- exprVAL.Filter = labels.MatchRegexp -+ exprVAL.Filter = log.LineMatchRegexp - } - case 58: - exprDollar = exprS[exprpt-1 : exprpt+1] - { -- exprVAL.Filter = labels.MatchEqual -+ exprVAL.Filter = log.LineMatchEqual - } - case 59: - exprDollar = exprS[exprpt-1 : exprpt+1] - { -- exprVAL.Filter = labels.MatchNotRegexp -+ exprVAL.Filter = log.LineMatchNotRegexp - } - case 60: - exprDollar = exprS[exprpt-1 : exprpt+1] - { -- exprVAL.Filter = labels.MatchNotEqual -+ exprVAL.Filter = log.LineMatchNotEqual - } - case 61: - exprDollar = exprS[exprpt-3 : exprpt+1] -@@ -1296,17 +1293,17 @@ exprdefault: - case 84: - exprDollar = exprS[exprpt-1 : exprpt+1] - { -- exprVAL.OrFilter = newLineFilterExpr(labels.MatchEqual, """", exprDollar[1].str) -+ exprVAL.OrFilter = newLineFilterExpr(log.LineMatchEqual, """", exprDollar[1].str) - } - case 85: - exprDollar = exprS[exprpt-4 : exprpt+1] - { -- exprVAL.OrFilter = newLineFilterExpr(labels.MatchEqual, exprDollar[1].FilterOp, exprDollar[3].str) -+ exprVAL.OrFilter = newLineFilterExpr(log.LineMatchEqual, exprDollar[1].FilterOp, exprDollar[3].str) - } - case 86: - exprDollar = exprS[exprpt-3 : exprpt+1] - { -- exprVAL.OrFilter = newOrLineFilter(newLineFilterExpr(labels.MatchEqual, """", exprDollar[1].str), exprDollar[3].OrFilter) -+ exprVAL.OrFilter = newOrLineFilter(newLineFilterExpr(log.LineMatchEqual, """", exprDollar[1].str), exprDollar[3].OrFilter) - } - case 87: - exprDollar = exprS[exprpt-2 : exprpt+1] -diff --git a/pkg/logql/syntax/linefilter.go b/pkg/logql/syntax/linefilter.go -index f85b210234139..9b07e95deb12d 100644 ---- a/pkg/logql/syntax/linefilter.go -+++ b/pkg/logql/syntax/linefilter.go -@@ -1,8 +1,7 @@ - package syntax - - import ( -- ""github.com/prometheus/prometheus/model/labels"" -- -+ ""github.com/grafana/loki/pkg/logql/log"" - ""github.com/grafana/loki/pkg/util/encoding"" - ) - -@@ -40,7 +39,7 @@ func (lf LineFilter) MarshalTo(b []byte) (int, error) { - - func (lf *LineFilter) Unmarshal(b []byte) error { - buf := encoding.DecWith(b) -- lf.Ty = labels.MatchType(buf.Uvarint()) -+ lf.Ty = log.LineMatchType(buf.Uvarint()) - lf.Match = buf.UvarintStr() - lf.Op = buf.UvarintStr() - return nil -diff --git a/pkg/logql/syntax/linefilter_test.go b/pkg/logql/syntax/linefilter_test.go -index 6ce5a601c2815..d0cc700ce4601 100644 ---- a/pkg/logql/syntax/linefilter_test.go -+++ b/pkg/logql/syntax/linefilter_test.go -@@ -4,18 +4,19 @@ import ( - ""fmt"" - ""testing"" - -- ""github.com/prometheus/prometheus/model/labels"" - ""github.com/stretchr/testify/require"" -+ -+ ""github.com/grafana/loki/pkg/logql/log"" - ) - - func TestLineFilterSerialization(t *testing.T) { - for i, orig := range []LineFilter{ - {}, -- {Ty: labels.MatchEqual, Match: ""match""}, -- {Ty: labels.MatchEqual, Match: ""match"", Op: ""OR""}, -- {Ty: labels.MatchNotEqual, Match: ""not match""}, -- {Ty: labels.MatchNotEqual, Match: ""not match"", Op: ""OR""}, -- {Ty: labels.MatchRegexp, Op: ""OR""}, -+ {Ty: log.LineMatchEqual, Match: ""match""}, -+ {Ty: log.LineMatchEqual, Match: ""match"", Op: ""OR""}, -+ {Ty: log.LineMatchNotEqual, Match: ""not match""}, -+ {Ty: log.LineMatchNotEqual, Match: ""not match"", Op: ""OR""}, -+ {Ty: log.LineMatchRegexp, Op: ""OR""}, - } { - t.Run(fmt.Sprintf(""%d"", i), func(t *testing.T) { - b := make([]byte, orig.Size()) -diff --git a/pkg/logql/syntax/parser_test.go b/pkg/logql/syntax/parser_test.go -index 7152d78adac12..faa55015e5838 100644 ---- a/pkg/logql/syntax/parser_test.go -+++ b/pkg/logql/syntax/parser_test.go -@@ -30,7 +30,7 @@ var ParseTestCases = []struct { - Left: &LogRange{ - Left: &PipelineExpr{ - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchRegexp, """", ""error\\""), -+ newLineFilterExpr(log.LineMatchRegexp, """", ""error\\""), - }, - Left: &MatchersExpr{ - Mts: []*labels.Matcher{ -@@ -60,7 +60,7 @@ var ParseTestCases = []struct { - Left: newPipelineExpr( - newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""foo"", Value: ""bar""}}), - MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""error""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""error""), - }, - ), - Interval: 12 * time.Hour, -@@ -75,7 +75,7 @@ var ParseTestCases = []struct { - Left: &LogRange{ - Left: newPipelineExpr( - newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""foo"", Value: ""bar""}}), -- MultiStageExpr{newLineFilterExpr(labels.MatchEqual, """", ""error"")}, -+ MultiStageExpr{newLineFilterExpr(log.LineMatchEqual, """", ""error"")}, - ), - Interval: 12 * time.Hour, - }, -@@ -392,8 +392,8 @@ var ParseTestCases = []struct { - newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, ""foo"", ""bar"")}), - MultiStageExpr{ - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -- newLineFilterExpr(labels.MatchEqual, OpFilterIP, ""123.123.123.123""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchEqual, OpFilterIP, ""123.123.123.123""), - ), - }, - ), -@@ -404,7 +404,7 @@ var ParseTestCases = []struct { - newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, ""foo"", ""bar""), mustNewMatcher(labels.MatchEqual, ""ip"", ""foo"")}), - MultiStageExpr{ - newLogfmtParserExpr(nil), -- newLineFilterExpr(labels.MatchEqual, OpFilterIP, ""127.0.0.1""), -+ newLineFilterExpr(log.LineMatchEqual, OpFilterIP, ""127.0.0.1""), - newLabelFilterExpr(log.NewStringLabelFilter(mustNewMatcher(labels.MatchEqual, ""ip"", ""2.3.4.5""))), - newLabelFilterExpr(log.NewStringLabelFilter(mustNewMatcher(labels.MatchEqual, ""ip"", ""abc""))), - newLabelFilterExpr(log.NewIPLabelFilter(""4.5.6.7"", ""ipaddr"", log.LabelFilterEqual)), -@@ -417,7 +417,7 @@ var ParseTestCases = []struct { - exp: newPipelineExpr( - newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, ""foo"", ""bar"")}), - MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, OpFilterIP, ""123.123.123.123""), -+ newLineFilterExpr(log.LineMatchEqual, OpFilterIP, ""123.123.123.123""), - }, - ), - }, -@@ -427,8 +427,8 @@ var ParseTestCases = []struct { - newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, ""foo"", ""bar"")}), - MultiStageExpr{ - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchEqual, OpFilterIP, ""123.123.123.123""), -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchEqual, OpFilterIP, ""123.123.123.123""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), - ), - }, - ), -@@ -440,10 +440,10 @@ var ParseTestCases = []struct { - MultiStageExpr{ - newNestedLineFilterExpr( - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchEqual, OpFilterIP, ""123.123.123.123""), -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchEqual, OpFilterIP, ""123.123.123.123""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), - ), -- newLineFilterExpr(labels.MatchEqual, OpFilterIP, ""123.123.123.123""), -+ newLineFilterExpr(log.LineMatchEqual, OpFilterIP, ""123.123.123.123""), - ), - }, - ), -@@ -454,8 +454,8 @@ var ParseTestCases = []struct { - newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, ""foo"", ""bar"")}), - MultiStageExpr{ - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -- newLineFilterExpr(labels.MatchEqual, OpFilterIP, ""123.123.123.123""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchEqual, OpFilterIP, ""123.123.123.123""), - ), - }, - ), -@@ -465,7 +465,7 @@ var ParseTestCases = []struct { - exp: newPipelineExpr( - newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, ""foo"", ""bar"")}), - MultiStageExpr{ -- newLineFilterExpr(labels.MatchNotEqual, OpFilterIP, ""123.123.123.123""), -+ newLineFilterExpr(log.LineMatchNotEqual, OpFilterIP, ""123.123.123.123""), - }, - ), - }, -@@ -475,8 +475,8 @@ var ParseTestCases = []struct { - newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, ""foo"", ""bar"")}), - MultiStageExpr{ - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchNotEqual, OpFilterIP, ""123.123.123.123""), -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchNotEqual, OpFilterIP, ""123.123.123.123""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), - ), - }, - ), -@@ -488,10 +488,10 @@ var ParseTestCases = []struct { - MultiStageExpr{ - newNestedLineFilterExpr( - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchNotEqual, OpFilterIP, ""123.123.123.123""), -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchNotEqual, OpFilterIP, ""123.123.123.123""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), - ), -- newLineFilterExpr(labels.MatchNotEqual, OpFilterIP, ""123.123.123.123""), -+ newLineFilterExpr(log.LineMatchNotEqual, OpFilterIP, ""123.123.123.123""), - ), - }, - ), -@@ -662,7 +662,7 @@ var ParseTestCases = []struct { - in: `{foo=""bar""} |= ""baz""`, - exp: newPipelineExpr( - newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, ""foo"", ""bar"")}), -- MultiStageExpr{newLineFilterExpr(labels.MatchEqual, """", ""baz"")}, -+ MultiStageExpr{newLineFilterExpr(log.LineMatchEqual, """", ""baz"")}, - ), - }, - { -@@ -673,12 +673,12 @@ var ParseTestCases = []struct { - newNestedLineFilterExpr( - newNestedLineFilterExpr( - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -- newLineFilterExpr(labels.MatchRegexp, """", ""blip""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchRegexp, """", ""blip""), - ), -- newLineFilterExpr(labels.MatchNotEqual, """", ""flip""), -+ newLineFilterExpr(log.LineMatchNotEqual, """", ""flip""), - ), -- newLineFilterExpr(labels.MatchNotRegexp, """", ""flap""), -+ newLineFilterExpr(log.LineMatchNotRegexp, """", ""flap""), - ), - }, - ), -@@ -693,12 +693,12 @@ var ParseTestCases = []struct { - newNestedLineFilterExpr( - newNestedLineFilterExpr( - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -- newLineFilterExpr(labels.MatchRegexp, """", ""blip""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchRegexp, """", ""blip""), - ), -- newLineFilterExpr(labels.MatchNotEqual, """", ""flip""), -+ newLineFilterExpr(log.LineMatchNotEqual, """", ""flip""), - ), -- newLineFilterExpr(labels.MatchNotRegexp, """", ""flap""), -+ newLineFilterExpr(log.LineMatchNotRegexp, """", ""flap""), - ), - }, - ), -@@ -715,12 +715,12 @@ var ParseTestCases = []struct { - newNestedLineFilterExpr( - newNestedLineFilterExpr( - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -- newLineFilterExpr(labels.MatchRegexp, """", ""blip""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchRegexp, """", ""blip""), - ), -- newLineFilterExpr(labels.MatchNotEqual, """", ""flip""), -+ newLineFilterExpr(log.LineMatchNotEqual, """", ""flip""), - ), -- newLineFilterExpr(labels.MatchNotRegexp, """", ""flap""), -+ newLineFilterExpr(log.LineMatchNotRegexp, """", ""flap""), - ), - }, - ), -@@ -737,12 +737,12 @@ var ParseTestCases = []struct { - newNestedLineFilterExpr( - newNestedLineFilterExpr( - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -- newLineFilterExpr(labels.MatchRegexp, """", ""blip""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchRegexp, """", ""blip""), - ), -- newLineFilterExpr(labels.MatchNotEqual, """", ""flip""), -+ newLineFilterExpr(log.LineMatchNotEqual, """", ""flip""), - ), -- newLineFilterExpr(labels.MatchNotRegexp, """", ""flap""), -+ newLineFilterExpr(log.LineMatchNotRegexp, """", ""flap""), - ), - newLabelParserExpr(OpParserTypeUnpack, """"), - }, -@@ -769,12 +769,12 @@ var ParseTestCases = []struct { - newNestedLineFilterExpr( - newNestedLineFilterExpr( - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -- newLineFilterExpr(labels.MatchRegexp, """", ""blip""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchRegexp, """", ""blip""), - ), -- newLineFilterExpr(labels.MatchNotEqual, """", ""flip""), -+ newLineFilterExpr(log.LineMatchNotEqual, """", ""flip""), - ), -- newLineFilterExpr(labels.MatchNotRegexp, """", ""flap""), -+ newLineFilterExpr(log.LineMatchNotRegexp, """", ""flap""), - ), - }, - ), -@@ -796,12 +796,12 @@ var ParseTestCases = []struct { - newNestedLineFilterExpr( - newNestedLineFilterExpr( - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -- newLineFilterExpr(labels.MatchRegexp, """", ""blip""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchRegexp, """", ""blip""), - ), -- newLineFilterExpr(labels.MatchNotEqual, """", ""flip""), -+ newLineFilterExpr(log.LineMatchNotEqual, """", ""flip""), - ), -- newLineFilterExpr(labels.MatchNotRegexp, """", ""flap""), -+ newLineFilterExpr(log.LineMatchNotRegexp, """", ""flap""), - ), - }, - ), -@@ -824,12 +824,12 @@ var ParseTestCases = []struct { - newNestedLineFilterExpr( - newNestedLineFilterExpr( - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -- newLineFilterExpr(labels.MatchRegexp, """", ""blip""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchRegexp, """", ""blip""), - ), -- newLineFilterExpr(labels.MatchNotEqual, """", ""flip""), -+ newLineFilterExpr(log.LineMatchNotEqual, """", ""flip""), - ), -- newLineFilterExpr(labels.MatchNotRegexp, """", ""flap""), -+ newLineFilterExpr(log.LineMatchNotRegexp, """", ""flap""), - ), - }, - ), -@@ -852,12 +852,12 @@ var ParseTestCases = []struct { - newNestedLineFilterExpr( - newNestedLineFilterExpr( - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -- newLineFilterExpr(labels.MatchRegexp, """", ""blip""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchRegexp, """", ""blip""), - ), -- newLineFilterExpr(labels.MatchNotEqual, """", ""flip""), -+ newLineFilterExpr(log.LineMatchNotEqual, """", ""flip""), - ), -- newLineFilterExpr(labels.MatchNotRegexp, """", ""flap""), -+ newLineFilterExpr(log.LineMatchNotRegexp, """", ""flap""), - ), - }, - ), -@@ -882,12 +882,12 @@ var ParseTestCases = []struct { - newNestedLineFilterExpr( - newNestedLineFilterExpr( - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -- newLineFilterExpr(labels.MatchRegexp, """", ""blip""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchRegexp, """", ""blip""), - ), -- newLineFilterExpr(labels.MatchNotEqual, """", ""flip""), -+ newLineFilterExpr(log.LineMatchNotEqual, """", ""flip""), - ), -- newLineFilterExpr(labels.MatchNotRegexp, """", ""flap""), -+ newLineFilterExpr(log.LineMatchNotRegexp, """", ""flap""), - ), - }, - ), -@@ -913,12 +913,12 @@ var ParseTestCases = []struct { - newNestedLineFilterExpr( - newNestedLineFilterExpr( - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -- newLineFilterExpr(labels.MatchRegexp, """", ""blip""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchRegexp, """", ""blip""), - ), -- newLineFilterExpr(labels.MatchNotEqual, """", ""flip""), -+ newLineFilterExpr(log.LineMatchNotEqual, """", ""flip""), - ), -- newLineFilterExpr(labels.MatchNotRegexp, """", ""flap""), -+ newLineFilterExpr(log.LineMatchNotRegexp, """", ""flap""), - ), - }, - ), -@@ -935,12 +935,12 @@ var ParseTestCases = []struct { - newNestedLineFilterExpr( - newNestedLineFilterExpr( - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -- newLineFilterExpr(labels.MatchRegexp, """", ""blip""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchRegexp, """", ""blip""), - ), -- newLineFilterExpr(labels.MatchNotEqual, """", ""flip""), -+ newLineFilterExpr(log.LineMatchNotEqual, """", ""flip""), - ), -- newLineFilterExpr(labels.MatchNotRegexp, """", ""flap""), -+ newLineFilterExpr(log.LineMatchNotRegexp, """", ""flap""), - ), - }, - ), -@@ -963,12 +963,12 @@ var ParseTestCases = []struct { - newNestedLineFilterExpr( - newNestedLineFilterExpr( - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -- newLineFilterExpr(labels.MatchRegexp, """", ""blip""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchRegexp, """", ""blip""), - ), -- newLineFilterExpr(labels.MatchNotEqual, """", ""flip""), -+ newLineFilterExpr(log.LineMatchNotEqual, """", ""flip""), - ), -- newLineFilterExpr(labels.MatchNotRegexp, """", ""flap""), -+ newLineFilterExpr(log.LineMatchNotRegexp, """", ""flap""), - ), - }, - ), -@@ -993,12 +993,12 @@ var ParseTestCases = []struct { - newNestedLineFilterExpr( - newNestedLineFilterExpr( - newNestedLineFilterExpr( -- newLineFilterExpr(labels.MatchEqual, """", ""baz""), -- newLineFilterExpr(labels.MatchRegexp, """", ""blip""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""baz""), -+ newLineFilterExpr(log.LineMatchRegexp, """", ""blip""), - ), -- newLineFilterExpr(labels.MatchNotEqual, """", ""flip""), -+ newLineFilterExpr(log.LineMatchNotEqual, """", ""flip""), - ), -- newLineFilterExpr(labels.MatchNotRegexp, """", ""flap""), -+ newLineFilterExpr(log.LineMatchNotRegexp, """", ""flap""), - ), - }, - ), -@@ -1257,7 +1257,7 @@ var ParseTestCases = []struct { - mustNewMatcher(labels.MatchEqual, ""namespace"", ""tns""), - }), - MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""level=error""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""level=error""), - }), - Interval: 5 * time.Minute, - }, OpRangeTypeCount, nil, nil), -@@ -1291,7 +1291,7 @@ var ParseTestCases = []struct { - mustNewMatcher(labels.MatchEqual, ""namespace"", ""tns""), - }), - MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""level=error""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""level=error""), - }), - Interval: 5 * time.Minute, - }, OpRangeTypeCount, nil, nil), -@@ -1368,7 +1368,7 @@ var ParseTestCases = []struct { - exp: &PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -1387,7 +1387,7 @@ var ParseTestCases = []struct { - exp: &PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeUnpack, """"), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ -@@ -1407,7 +1407,7 @@ var ParseTestCases = []struct { - exp: &PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewAndLabelFilter( -@@ -1426,7 +1426,7 @@ var ParseTestCases = []struct { - exp: &PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypePattern, "" bar ""), - &LabelFilterExpr{ - LabelFilterer: log.NewAndLabelFilter( -@@ -1445,7 +1445,7 @@ var ParseTestCases = []struct { - exp: &PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -1464,7 +1464,7 @@ var ParseTestCases = []struct { - exp: &PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewAndLabelFilter( -@@ -1483,7 +1483,7 @@ var ParseTestCases = []struct { - exp: &PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -1503,7 +1503,7 @@ var ParseTestCases = []struct { - exp: &PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -1534,7 +1534,7 @@ var ParseTestCases = []struct { - exp: &PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLineFmtExpr(""blip{{ .foo }}blop""), - }, - }, -@@ -1545,7 +1545,7 @@ var ParseTestCases = []struct { - exp: &PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -1566,7 +1566,7 @@ var ParseTestCases = []struct { - exp: &PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -1592,7 +1592,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -1638,7 +1638,7 @@ var ParseTestCases = []struct { - exp: &PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -1659,7 +1659,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -1690,7 +1690,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -1720,7 +1720,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""namespace"", Value: ""tns""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""level=error""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""level=error""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewAndLabelFilter( -@@ -1742,7 +1742,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""namespace"", Value: ""tns""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""level=error""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""level=error""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewAndLabelFilter( -@@ -1764,7 +1764,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""namespace"", Value: ""tns""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""level=error""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""level=error""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewAndLabelFilter( -@@ -1786,7 +1786,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""namespace"", Value: ""tns""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""level=error""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""level=error""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewAndLabelFilter( -@@ -1808,7 +1808,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - }, - }, - 5*time.Minute, -@@ -1890,7 +1890,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -1921,7 +1921,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -1952,7 +1952,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -1983,7 +1983,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -2018,7 +2018,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -2057,7 +2057,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -2096,7 +2096,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -2135,7 +2135,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -2174,7 +2174,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -2213,7 +2213,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -2263,7 +2263,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -2295,7 +2295,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -2344,7 +2344,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -2376,7 +2376,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -2425,7 +2425,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -2457,7 +2457,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -2506,7 +2506,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -2538,7 +2538,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -2655,7 +2655,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -2687,7 +2687,7 @@ var ParseTestCases = []struct { - newLogRange(&PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - &LabelFilterExpr{ - LabelFilterer: log.NewOrLabelFilter( -@@ -2932,7 +2932,7 @@ var ParseTestCases = []struct { - exp: &PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""bar""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""bar""), - newLabelParserExpr(OpParserTypeJSON, """"), - }, - }, -@@ -2963,7 +2963,7 @@ var ParseTestCases = []struct { - exp: &PipelineExpr{ - Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: ""app"", Value: ""foo""}}), - MultiStages: MultiStageExpr{ -- newLineFilterExpr(labels.MatchEqual, """", ""#""), -+ newLineFilterExpr(log.LineMatchEqual, """", ""#""), - }, - }, - }, -@@ -3147,23 +3147,23 @@ var ParseTestCases = []struct { - Left: newOrLineFilter( - &LineFilterExpr{ - LineFilter: LineFilter{ -- Ty: labels.MatchEqual, -+ Ty: log.LineMatchEqual, - Match: ""foo"", - }, - }, - &LineFilterExpr{ - LineFilter: LineFilter{ -- Ty: labels.MatchEqual, -+ Ty: log.LineMatchEqual, - Match: ""bar"", - }, - }), - LineFilter: LineFilter{ -- Ty: labels.MatchEqual, -+ Ty: log.LineMatchEqual, - Match: ""buzz"", - }, - Or: &LineFilterExpr{ - LineFilter: LineFilter{ -- Ty: labels.MatchEqual, -+ Ty: log.LineMatchEqual, - Match: ""fizz"", - }, - IsOrChild: true, -diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go -index d5ad9ce705dea..ee896639f8a8c 100644 ---- a/pkg/querier/queryrange/roundtrip.go -+++ b/pkg/querier/queryrange/roundtrip.go -@@ -15,10 +15,10 @@ import ( - ""github.com/pkg/errors"" - ""github.com/prometheus/client_golang/prometheus"" - ""github.com/prometheus/common/model"" -- ""github.com/prometheus/prometheus/model/labels"" - - ""github.com/grafana/loki/pkg/logproto"" - ""github.com/grafana/loki/pkg/logql"" -+ logqllog ""github.com/grafana/loki/pkg/logql/log"" - ""github.com/grafana/loki/pkg/logql/syntax"" - ""github.com/grafana/loki/pkg/logqlmodel/stats"" - base ""github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"" -@@ -387,7 +387,7 @@ func (r roundTripper) Do(ctx context.Context, req base.Request) (base.Response, - func transformRegexQuery(req *http.Request, expr syntax.LogSelectorExpr) (syntax.LogSelectorExpr, error) { - regexp := req.Form.Get(""regexp"") - if regexp != """" { -- filterExpr, err := syntax.AddFilterExpr(expr, labels.MatchRegexp, """", regexp) -+ filterExpr, err := syntax.AddFilterExpr(expr, logqllog.LineMatchRegexp, """", regexp) - if err != nil { - return nil, err - } -diff --git a/pkg/storage/bloom/v1/bloom_tester.go b/pkg/storage/bloom/v1/bloom_tester.go -index 99b76c3a1a0d6..5aa688bfc2657 100644 ---- a/pkg/storage/bloom/v1/bloom_tester.go -+++ b/pkg/storage/bloom/v1/bloom_tester.go -@@ -3,7 +3,6 @@ package v1 - import ( - ""github.com/grafana/regexp"" - regexpsyntax ""github.com/grafana/regexp/syntax"" -- ""github.com/prometheus/prometheus/model/labels"" - - ""github.com/grafana/loki/pkg/logql/log"" - ""github.com/grafana/loki/pkg/logql/syntax"" -@@ -90,16 +89,16 @@ func FiltersToBloomTest(b NGramBuilder, filters ...syntax.LineFilterExpr) BloomT - - func simpleFilterToBloomTest(b NGramBuilder, filter syntax.LineFilter) BloomTest { - switch filter.Ty { -- case labels.MatchNotEqual, labels.MatchNotRegexp: -+ case log.LineMatchNotEqual, log.LineMatchNotRegexp: - // We cannot test _negated_ filters with a bloom filter since blooms are probabilistic - // filters that can only tell us if a string _might_ exist. - // For example, for `!= ""foo""`, the bloom filter might tell us that the string ""foo"" might exist - // but because we are not sure, we cannot discard that chunk because it might actually not be there. - // Therefore, we return a test that always returns true. - return MatchAll -- case labels.MatchEqual: -+ case log.LineMatchEqual: - return newStringTest(b, filter.Match) -- case labels.MatchRegexp: -+ case log.LineMatchRegexp: - reg, err := regexpsyntax.Parse(filter.Match, regexpsyntax.Perl) - if err != nil { - // TODO: log error",chore,refactor line filter MatchType (#12388) -18108faaf335d96e1ab187a7d40a8fe00c1d998d,2025-01-17 23:30:53,Christian Haudum,"docs: Update titles of pages within the ""Manage"" section (#15814) - -Signed-off-by: Christian Haudum -Signed-off-by: J Stickler -Co-authored-by: J Stickler ",False,"diff --git a/docs/sources/operations/authentication.md b/docs/sources/operations/authentication.md -index 44ef07c9bc391..22daf7dbf3a76 100644 ---- a/docs/sources/operations/authentication.md -+++ b/docs/sources/operations/authentication.md -@@ -1,10 +1,10 @@ - --- --title: Authentication --menuTitle: --description: Describes Loki authentication. -+title: Manage authentication -+menuTitle: Authentication -+description: Describes how to add authentication to Grafana Loki. - weight: - --- --# Authentication -+# Manage authentication - - Grafana Loki does not come with any included authentication layer. Operators are - expected to run an authenticating reverse proxy in front of your services. -diff --git a/docs/sources/operations/automatic-stream-sharding.md b/docs/sources/operations/automatic-stream-sharding.md -index 2e876c89197f7..e2832650a3db0 100644 ---- a/docs/sources/operations/automatic-stream-sharding.md -+++ b/docs/sources/operations/automatic-stream-sharding.md -@@ -1,14 +1,14 @@ - --- --title: Automatic stream sharding -+title: Manage large volume log streams with automatic stream sharding - menuTitle: Automatic stream sharding - description: Describes how to control issues around the per-stream rate limit using automatic stream sharding. - weight: - --- - --# Automatic stream sharding -+# Manage large volume log streams with automatic stream sharding - --Automatic stream sharding will attempt to keep streams under a `desired_rate` by adding new labels and values to --existing streams. When properly tuned, this should eliminate issues where log producers are rate limited due to the -+Automatic stream sharding can keep streams under a `desired_rate` by adding new labels and values to -+existing streams. When properly tuned, this can eliminate issues where log producers are rate limited due to the - per-stream rate limit. - - **To enable automatic stream sharding:** -diff --git a/docs/sources/operations/autoscaling_queriers.md b/docs/sources/operations/autoscaling_queriers.md -index 908a625016807..86deaa2caa05a 100644 ---- a/docs/sources/operations/autoscaling_queriers.md -+++ b/docs/sources/operations/autoscaling_queriers.md -@@ -1,11 +1,10 @@ - --- --title: Autoscaling Loki queriers -+title: Manage varying workloads at scale with autoscaling queriers - menuTitle: Autoscaling queriers - description: Describes how to use KEDA to autoscale the quantity of queriers for a microsevices mode Kubernetes deployment. - weight: - --- -- --# Autoscaling Loki queriers -+# Manage varying workloads at scale with autoscaling queriers - - A microservices deployment of a Loki cluster that runs on Kubernetes typically handles a - workload that varies throughout the day. -diff --git a/docs/sources/operations/blocking-queries.md b/docs/sources/operations/blocking-queries.md -index ac286bbb3f849..1af8bec04beba 100644 ---- a/docs/sources/operations/blocking-queries.md -+++ b/docs/sources/operations/blocking-queries.md -@@ -1,10 +1,10 @@ - --- --title: Blocking Queries --menuTitle: --description: Describes how to configure Loki to block expensive queries using per-tenant overrides. -+title: Block unwanted queries -+menuTitle: Unwanted queries -+description: Describes how to configure Grafana Loki to block unwanted or expensive queries using per-tenant overrides. - weight: - --- --# Blocking Queries -+# Block unwanted queries - - In certain situations, you may not be able to control the queries being sent to your Loki installation. These queries - may be intentionally or unintentionally expensive to run, and they may affect the overall stability or cost of running -diff --git a/docs/sources/operations/bloom-filters.md b/docs/sources/operations/bloom-filters.md -index e1a09cdebcb07..c1030f63fe1e3 100644 ---- a/docs/sources/operations/bloom-filters.md -+++ b/docs/sources/operations/bloom-filters.md -@@ -1,5 +1,5 @@ - --- --title: Bloom filters (Experimental) -+title: Manage bloom filter building and querying (Experimental) - menuTitle: Bloom filters - description: Describes how to enable and configure query acceleration with bloom filters. - weight: -@@ -9,13 +9,12 @@ keywords: - aliases: - - ./query-acceleration-blooms - --- -- --# Bloom filters (Experimental) -+# Manage bloom filter building and querying (Experimental) - - {{< admonition type=""warning"" >}} - In Loki and Grafana Enterprise Logs (GEL), Query acceleration using blooms is an [experimental feature](/docs/release-life-cycle/). Engineering and on-call support is not available. No SLA is provided. Note that this feature is intended for users who are ingesting more than 75TB of logs a month, as it is designed to accelerate queries against large volumes of logs. - --In Grafana Cloud, Query acceleration using Bloom filters is enabled as a [public preview](/docs/release-life-cycle/) for select large-scale customers that are ingesting more that 75TB of logs a month. Limited support and no SLA are provided. -+In Grafana Cloud, Query acceleration using bloom filters is enabled as a [public preview](/docs/release-life-cycle/) for select large-scale customers that are ingesting more that 75TB of logs a month. Limited support and no SLA are provided. - {{< /admonition >}} - - Loki leverages [bloom filters](https://en.wikipedia.org/wiki/Bloom_filter) to speed up queries by reducing the amount of data Loki needs to load from the store and iterate through. -diff --git a/docs/sources/operations/caching.md b/docs/sources/operations/caching.md -index 53a132db58352..e401eb020e4c8 100644 ---- a/docs/sources/operations/caching.md -+++ b/docs/sources/operations/caching.md -@@ -1,14 +1,13 @@ - --- --title: Caching -+title: Configure caches to speed up queries - menuTitle: Caching --description: Describes how to enable and configure memcached to speed query performance. -+description: Describes how to enable and configure memcached to improve query performance. - weight: - keywords: - - memcached - - caching - --- -- --# Caching -+# Configure caches to speed up queries - - Loki supports caching of index writes and lookups, chunks and query results to - speed up query performance. This sections describes the recommended Memcached -diff --git a/docs/sources/operations/loki-canary/_index.md b/docs/sources/operations/loki-canary/_index.md -index f6c1bf23a9388..6fb18529357c6 100644 ---- a/docs/sources/operations/loki-canary/_index.md -+++ b/docs/sources/operations/loki-canary/_index.md -@@ -1,10 +1,10 @@ - --- --title: Loki Canary --menuTitle: -+title: Audit data propagation latency and correctness using Loki Canary -+menuTitle: Loki Canary - description: Describes how to use Loki Canary to audit the log-capturing performance of a Grafana Loki cluster to ensure Loki is ingesting logs without data loss. - weight: - --- --# Loki Canary -+# Audit data propagation latency and correctness using Loki Canary - - Loki Canary is a standalone app that audits the log-capturing performance of a Grafana Loki cluster. - This component emits and periodically queries for logs, making sure that Loki is ingesting logs without any data loss. -diff --git a/docs/sources/operations/meta-monitoring/_index.md b/docs/sources/operations/meta-monitoring/_index.md -index 7b90955ef2ad4..12906a926b15e 100644 ---- a/docs/sources/operations/meta-monitoring/_index.md -+++ b/docs/sources/operations/meta-monitoring/_index.md -@@ -1,11 +1,11 @@ - --- --title: Monitor Loki -+title: Collect metrics and logs of your Loki cluster -+menuTitle: Monitor Loki - description: Describes the various options for monitoring your Loki environment, and the metrics available. - aliases: - - ../operations/observability - --- -- --# Monitor Loki -+# Collect metrics and logs of your Loki cluster - - As part of your Loki implementation, you will also want to monitor your Loki cluster. - -diff --git a/docs/sources/operations/meta-monitoring/mixins.md b/docs/sources/operations/meta-monitoring/mixins.md -index a4a819c4e3d28..d95cae5861497 100644 ---- a/docs/sources/operations/meta-monitoring/mixins.md -+++ b/docs/sources/operations/meta-monitoring/mixins.md -@@ -1,11 +1,10 @@ - --- --title: Install Loki mixins --menuTitle: Install mixins --description: Describes the Loki mixins, how to configure and install the dashboards, alerts, and recording rules. -+title: Install dashboards, alerts, and recording rules -+menuTitle: Mixins -+description: Describes the Loki mixins, how to configure and install the dashboards, alerts, and recording rules. - weight: 100 - --- -- --# Install Loki mixins -+# Install dashboards, alerts, and recording rules - - Loki is instrumented to expose metrics about itself via the `/metrics` endpoint, designed to be scraped by Prometheus. Each Loki release includes a mixin. The Loki mixin provides a set of Grafana dashboards, Prometheus recording rules and alerts for monitoring Loki. - -diff --git a/docs/sources/operations/multi-tenancy.md b/docs/sources/operations/multi-tenancy.md -index bcd61ded9237c..97594795446be 100644 ---- a/docs/sources/operations/multi-tenancy.md -+++ b/docs/sources/operations/multi-tenancy.md -@@ -1,10 +1,10 @@ - --- --title: Multi-tenancy --menuTitle: --description: Describes how Loki implements multi-tenancy to isolate tenant data and queries. -+title: Manage tenant isolation -+menuTitle: Multi-tenancy -+description: Describes how Grafana Loki implements multi-tenancy to isolate tenant data and queries. - weight: - --- --# Multi-tenancy -+# Manage tenant isolation - - Grafana Loki is a multi-tenant system; requests and data for tenant A are isolated from - tenant B. Requests to the Loki API should include an HTTP header -diff --git a/docs/sources/operations/overrides-exporter.md b/docs/sources/operations/overrides-exporter.md -index ef645ca28efde..a467bff64a91e 100644 ---- a/docs/sources/operations/overrides-exporter.md -+++ b/docs/sources/operations/overrides-exporter.md -@@ -1,11 +1,11 @@ - --- --title: Overrides exporter --menuTitle: --description: Describes how the Overrides Exporter module exposes tenant limits as Prometheus metrics. -+title: Monitor tenant limits using the Overrides Exporter -+menuTitle: Overrides Exporter -+description: Describes how the Overrides Exporter exposes tenant limits as Prometheus metrics. - weight: - --- - --# Overrides exporter -+# Monitor tenant limits using the Overrides Exporter - - Loki is a multi-tenant system that supports applying limits to each tenant as a mechanism for resource management. The `overrides-exporter` module exposes these limits as Prometheus metrics in order to help operators better understand tenant behavior. - -diff --git a/docs/sources/operations/query-fairness/_index.md b/docs/sources/operations/query-fairness/_index.md -index 79c569d5de723..655802629b82c 100644 ---- a/docs/sources/operations/query-fairness/_index.md -+++ b/docs/sources/operations/query-fairness/_index.md -@@ -1,11 +1,10 @@ - --- --title: Query fairness within tenants -+title: Ensure query fairness within tenants using actors - menuTitle: Query fairness - description: Describes methods for guaranteeing query fairness across multiple actors within a single tenant using the scheduler. - weight: - --- -- --# Query fairness within tenants -+# Ensure query fairness within tenants using actors - - Loki uses [shuffle sharding]({{< relref ""../shuffle-sharding/_index.md"" >}}) - to minimize impact across tenants in case of querier failures or misbehaving -diff --git a/docs/sources/operations/recording-rules.md b/docs/sources/operations/recording-rules.md -index 8c335740d5af6..b6c18ee1e09ca 100644 ---- a/docs/sources/operations/recording-rules.md -+++ b/docs/sources/operations/recording-rules.md -@@ -1,11 +1,12 @@ - --- --title: Recording Rules --menuTitle: --description: Working with recording rules. -+title: Manage recording rules -+menuTitle: Recording rules -+description: Describes how to setup and use recording rules in Grafana Loki. - weight: - --- -+# Manage recording rules - --# Recording Rules -+Recording rules are queries that run in an interval and produce metrics from logs that can be pushed to a Prometheus compatible backend. - - Recording rules are evaluated by the `ruler` component. Each `ruler` acts as its own `querier`, in the sense that it - executes queries against the store without using the `query-frontend` or `querier` components. It will respect all query -diff --git a/docs/sources/operations/request-validation-rate-limits.md b/docs/sources/operations/request-validation-rate-limits.md -index cb602c17c2292..6f631a25c5f57 100644 ---- a/docs/sources/operations/request-validation-rate-limits.md -+++ b/docs/sources/operations/request-validation-rate-limits.md -@@ -1,17 +1,16 @@ - --- --title: Request Validation and Rate-Limit Errors --menuTitle: --description: Request Validation and Rate-Limit Errors -+title: Enforce rate limits and push request validation -+menuTitle: Rate limits -+description: Decribes the different rate limits and push request validation and their error handling. - weight: - --- -+# Enforce rate limits and push request validation - --# Request Validation and Rate-Limit Errors -- --Loki will reject requests if they exceed a usage threshold (rate-limit error) or if they are invalid (validation error). -+Loki will reject requests if they exceed a usage threshold (rate limit error) or if they are invalid (validation error). - - All occurrences of these errors can be observed using the `loki_discarded_samples_total` and `loki_discarded_bytes_total` metrics. The sections below describe the various possible reasons specified in the `reason` label of these metrics. - --It is recommended that Loki operators set up alerts or dashboards with these metrics to detect when rate-limits or validation errors occur. -+It is recommended that Loki operators set up alerts or dashboards with these metrics to detect when rate limits or validation errors occur. - - - ### Terminology -@@ -26,7 +25,7 @@ Rate-limits are enforced when Loki cannot handle more requests from a tenant. - - ### `rate_limited` - --This rate-limit is enforced when a tenant has exceeded their configured log ingestion rate-limit. -+This rate limit is enforced when a tenant has exceeded their configured log ingestion rate limit. - - One solution if you're seeing samples dropped due to `rate_limited` is simply to increase the rate limits on your Loki cluster. These limits can be modified globally in the [`limits_config`](/docs/loki//configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](/docs/loki//configuration/#runtime-configuration-file) file. The config options to use are `ingestion_rate_mb` and `ingestion_burst_size_mb`. - -@@ -46,9 +45,9 @@ Note that you'll want to make sure your Loki cluster has sufficient resources pr - - ### `per_stream_rate_limit` - --This limit is enforced when a single stream reaches its rate-limit. -+This limit is enforced when a single stream reaches its rate limit. - --Each stream has a rate-limit applied to it to prevent individual streams from overwhelming the set of ingesters it is distributed to (the size of that set is equal to the `replication_factor` value). -+Each stream has a rate limit applied to it to prevent individual streams from overwhelming the set of ingesters it is distributed to (the size of that set is equal to the `replication_factor` value). - - This value can be modified globally in the [`limits_config`](/docs/loki//configuration/#limits_config) block, or on a per-tenant basis in the [runtime overrides](/docs/loki//configuration/#runtime-configuration-file) file. The config options to adjust are `per_stream_rate_limit` and `per_stream_rate_limit_burst`. - -diff --git a/docs/sources/operations/scalability.md b/docs/sources/operations/scalability.md -index 1cc6e87d12640..6916b60cd12f7 100644 ---- a/docs/sources/operations/scalability.md -+++ b/docs/sources/operations/scalability.md -@@ -1,13 +1,13 @@ - --- --title: Scale Loki --menuTitle: Scale --description: Describes how to scale Grafana Loki -+title: Manage larger production deployments -+menuTitle: Scale Loki -+description: Describes strategies how to scale a Loki deployment when log volume increases. - weight: - --- --# Scale Loki -+# Manage larger production deployments - --When scaling Loki, operators should consider running several Loki processes --partitioned by role (ingester, distributor, querier) rather than a single Loki -+When needing to scale Loki due to increased log volume, operators should consider running several Loki processes -+partitioned by role (ingester, distributor, querier, and so on) rather than a single Loki - process. Grafana Labs' [production setup](https://github.com/grafana/loki/blob/main/production/ksonnet/loki) - contains `.libsonnet` files that demonstrates configuring separate components - and scaling for resource usage. -diff --git a/docs/sources/operations/shuffle-sharding/_index.md b/docs/sources/operations/shuffle-sharding/_index.md -index 166fd1992d583..aa658b69d5599 100644 ---- a/docs/sources/operations/shuffle-sharding/_index.md -+++ b/docs/sources/operations/shuffle-sharding/_index.md -@@ -1,11 +1,10 @@ - --- --title: Shuffle sharding -+title: Isolate tenant workflows using shuffle sharding - menuTitle: Shuffle sharding - description: Describes how to isolate tenant workloads from other tenant workloads using shuffle sharding to provide a better sharing of resources. - weight: - --- -- --# Shuffle sharding -+# Isolate tenant workflows using shuffle sharding - - Shuffle sharding is a resource-management technique used to isolate tenant workloads from other tenant workloads, to give each tenant more of a single-tenant experience when running in a shared cluster. - This technique is explained by AWS in their article [Workload isolation using shuffle-sharding](https://aws.amazon.com/builders-library/workload-isolation-using-shuffle-sharding/). -diff --git a/docs/sources/operations/troubleshooting.md b/docs/sources/operations/troubleshooting.md -index d99436e181a00..04d5be0712d82 100644 ---- a/docs/sources/operations/troubleshooting.md -+++ b/docs/sources/operations/troubleshooting.md -@@ -1,12 +1,12 @@ - --- --title: Troubleshooting Loki --menuTitle: Troubleshooting --description: Describes how to troubleshoot Grafana Loki. -+title: Manage and debug errors -+menuTitle: Troubleshooting -+description: Describes how to troubleshoot and debug specific errors in Grafana Loki. - weight: - aliases: - - /docs/loki/latest/getting-started/troubleshooting/ - --- --# Troubleshooting Loki -+# Manage and debug errors - - ## ""Loki: Bad Gateway. 502"" - -diff --git a/docs/sources/operations/upgrade.md b/docs/sources/operations/upgrade.md -index 8b47232dff5bb..22c5cceeaf9f5 100644 ---- a/docs/sources/operations/upgrade.md -+++ b/docs/sources/operations/upgrade.md -@@ -1,10 +1,10 @@ - --- --title: Upgrade --description: Links to Loki upgrade documentation. -+title: Manage version upgrades -+menuTitle: Upgrade -+description: Links to Grafana Loki upgrade documentation. - weight: - --- -- --# Upgrade -+# Manage version upgrades - - - [Upgrade](https://grafana.com/docs/loki//setup/upgrade/) from one Loki version to a newer version. - -diff --git a/docs/sources/operations/zone-ingesters.md b/docs/sources/operations/zone-ingesters.md -index 7467f16ca09f3..51913da5e3b9e 100644 ---- a/docs/sources/operations/zone-ingesters.md -+++ b/docs/sources/operations/zone-ingesters.md -@@ -1,11 +1,10 @@ - --- --title: Zone aware ingesters --menuTitle: --description: Describes how to migrate from a single ingester StatefulSet to three zone aware ingester StatefulSets -+title: Speed up ingester rollout using zone awareness -+menuTitle: Zone aware ingesters -+description: Describes how to migrate from a single ingester StatefulSet to three zone aware ingester StatefulSets. - weight: - --- -- --# Zone aware ingesters -+# Speed up ingester rollout using zone awareness - - The Loki zone aware ingesters are used by Grafana Labs in order to allow for easier rollouts of large Loki deployments. You can think of them as three logical zones, however with some extra Kubernetes configuration you could deploy them in separate zones. - -@@ -111,4 +110,4 @@ These instructions assume you are using the zone aware ingester jsonnet deployme - - 1. clean up any remaining temporary config from the migration, for example `multi_zone_ingester_migration_enabled: true` is no longer needed. - --1. ensure that all the old default ingester PVC/PV are removed. -\ No newline at end of file -+1. ensure that all the old default ingester PVC/PV are removed.",docs,"Update titles of pages within the ""Manage"" section (#15814) - -Signed-off-by: Christian Haudum -Signed-off-by: J Stickler -Co-authored-by: J Stickler " -96130ae38923a69968eb8dc3de5975a90043e117,2021-11-02 05:45:35,Karen Miller,Docs: revise incendiary language added in PR 4507 (#4611),False,"diff --git a/docs/sources/clients/promtail/configuration.md b/docs/sources/clients/promtail/configuration.md -index b1e0050bf1d93..5d8c5d1913057 100644 ---- a/docs/sources/clients/promtail/configuration.md -+++ b/docs/sources/clients/promtail/configuration.md -@@ -277,7 +277,7 @@ external_labels: - # A comma-separated list of labels to include in the stream lag metric `promtail_stream_lag_seconds`. - # The default value is ""filename"". A ""host"" label is always included. - # The stream lag metric indicates which streams are falling behind on writes to Loki; --# be mindful about not using too many labels here as it can explode cardinality. -+# be mindful about using too many labels, as it can increase cardinality. - [stream_lag_labels: | default = ""filename""] - ```",Docs,revise incendiary language added in PR 4507 (#4611) -df61482207eb8f44f43d9c2ef4f450fc0c9a00ee,2024-08-08 03:42:13,renovate[bot],"fix(deps): update module golang.org/x/text to v0.17.0 (main) (#13794) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/go.mod b/go.mod -index 4afdf29a01459..8a9903dfa1104 100644 ---- a/go.mod -+++ b/go.mod -@@ -145,7 +145,7 @@ require ( - go4.org/netipx v0.0.0-20230125063823-8449b0a6169f - golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 - golang.org/x/oauth2 v0.21.0 -- golang.org/x/text v0.16.0 -+ golang.org/x/text v0.17.0 - google.golang.org/protobuf v1.34.2 - gotest.tools v2.2.0+incompatible - k8s.io/apimachinery v0.29.3 -diff --git a/go.sum b/go.sum -index 7c2261b084ab5..502a8d2f15daa 100644 ---- a/go.sum -+++ b/go.sum -@@ -2317,8 +2317,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= - golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= - golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= - golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= --golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= --golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -+golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -+golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= - golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= - golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= - golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -diff --git a/vendor/golang.org/x/text/LICENSE b/vendor/golang.org/x/text/LICENSE -index 6a66aea5eafe0..2a7cf70da6e49 100644 ---- a/vendor/golang.org/x/text/LICENSE -+++ b/vendor/golang.org/x/text/LICENSE -@@ -1,4 +1,4 @@ --Copyright (c) 2009 The Go Authors. All rights reserved. -+Copyright 2009 The Go Authors. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are -@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. -- * Neither the name of Google Inc. nor the names of its -+ * Neither the name of Google LLC nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -diff --git a/vendor/modules.txt b/vendor/modules.txt -index e9210912543bf..701241662ea95 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -1826,7 +1826,7 @@ golang.org/x/sys/windows/svc/eventlog - # golang.org/x/term v0.22.0 - ## explicit; go 1.18 - golang.org/x/term --# golang.org/x/text v0.16.0 -+# golang.org/x/text v0.17.0 - ## explicit; go 1.18 - golang.org/x/text/cases - golang.org/x/text/encoding",fix,"update module golang.org/x/text to v0.17.0 (main) (#13794) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -2cc901ad69bfc635646cda399b62b3540f5af1f5,2024-07-11 18:30:45,Salva Corts,refactor(blooms): Apply retention in planner (#13484),False,"diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md -index 1597222310b87..38502dc2caae1 100644 ---- a/docs/sources/shared/configuration.md -+++ b/docs/sources/shared/configuration.md -@@ -645,6 +645,11 @@ bloom_build: - # CLI flag: -bloom-build.planner.max-tasks-per-tenant - [max_queued_tasks_per_tenant: | default = 30000] - -+ retention: -+ # Enable bloom retention. -+ # CLI flag: -bloom-build.planner.retention.enabled -+ [enabled: | default = false] -+ - builder: - # The grpc_client block configures the gRPC client used to communicate - # between a client and server component in Loki. -diff --git a/pkg/bloombuild/planner/config.go b/pkg/bloombuild/planner/config.go -index 03ed5d204e2a7..40ec5707ef715 100644 ---- a/pkg/bloombuild/planner/config.go -+++ b/pkg/bloombuild/planner/config.go -@@ -8,10 +8,11 @@ import ( - - // Config configures the bloom-planner component. - type Config struct { -- PlanningInterval time.Duration `yaml:""planning_interval""` -- MinTableOffset int `yaml:""min_table_offset""` -- MaxTableOffset int `yaml:""max_table_offset""` -- MaxQueuedTasksPerTenant int `yaml:""max_queued_tasks_per_tenant""` -+ PlanningInterval time.Duration `yaml:""planning_interval""` -+ MinTableOffset int `yaml:""min_table_offset""` -+ MaxTableOffset int `yaml:""max_table_offset""` -+ MaxQueuedTasksPerTenant int `yaml:""max_queued_tasks_per_tenant""` -+ RetentionConfig RetentionConfig `yaml:""retention""` - } - - // RegisterFlagsWithPrefix registers flags for the bloom-planner configuration. -@@ -26,6 +27,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - // I'm doing it the simple way for now. - f.IntVar(&cfg.MaxTableOffset, prefix+"".max-table-offset"", 2, ""Oldest day-table offset (from today, inclusive) to compact. This can be used to lower cost by not trying to compact older data which doesn't change. This can be optimized by aligning it with the maximum `reject_old_samples_max_age` setting of any tenant."") - f.IntVar(&cfg.MaxQueuedTasksPerTenant, prefix+"".max-tasks-per-tenant"", 30000, ""Maximum number of tasks to queue per tenant."") -+ cfg.RetentionConfig.RegisterFlagsWithPrefix(prefix+"".retention"", f) - } - - func (cfg *Config) Validate() error { -@@ -33,10 +35,15 @@ func (cfg *Config) Validate() error { - return fmt.Errorf(""min-table-offset (%d) must be less than or equal to max-table-offset (%d)"", cfg.MinTableOffset, cfg.MaxTableOffset) - } - -+ if err := cfg.RetentionConfig.Validate(); err != nil { -+ return err -+ } -+ - return nil - } - - type Limits interface { -+ RetentionLimits - BloomCreationEnabled(tenantID string) bool - BloomSplitSeriesKeyspaceBy(tenantID string) int - BloomBuildMaxBuilders(tenantID string) int -diff --git a/pkg/bloombuild/planner/metrics.go b/pkg/bloombuild/planner/metrics.go -index 77ae68687b35a..3523135780e55 100644 ---- a/pkg/bloombuild/planner/metrics.go -+++ b/pkg/bloombuild/planner/metrics.go -@@ -42,6 +42,13 @@ type Metrics struct { - tenantsDiscovered prometheus.Counter - tenantTasksPlanned *prometheus.GaugeVec - tenantTasksCompleted *prometheus.GaugeVec -+ -+ // Retention metrics -+ retentionRunning prometheus.Gauge -+ retentionTime *prometheus.HistogramVec -+ retentionDaysPerIteration *prometheus.HistogramVec -+ retentionTenantsPerIteration *prometheus.HistogramVec -+ retentionTenantsExceedingLookback prometheus.Gauge - } - - func NewMetrics( -@@ -161,6 +168,47 @@ func NewMetrics( - Name: ""tenant_tasks_completed"", - Help: ""Number of tasks completed for a tenant during the current build iteration."", - }, []string{""tenant"", ""status""}), -+ -+ // Retention -+ retentionRunning: promauto.With(r).NewGauge(prometheus.GaugeOpts{ -+ Namespace: metricsNamespace, -+ Subsystem: metricsSubsystem, -+ Name: ""retention_running"", -+ Help: ""1 if retention is running in this compactor."", -+ }), -+ -+ retentionTime: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{ -+ Namespace: metricsNamespace, -+ Subsystem: metricsSubsystem, -+ Name: ""retention_time_seconds"", -+ Help: ""Time this retention process took to complete."", -+ Buckets: prometheus.DefBuckets, -+ }, []string{""status""}), -+ -+ retentionDaysPerIteration: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{ -+ Namespace: metricsNamespace, -+ Subsystem: metricsSubsystem, -+ Name: ""retention_days_processed"", -+ Help: ""Number of days iterated over during the retention process."", -+ // 1day -> 5 years, 10 buckets -+ Buckets: prometheus.ExponentialBucketsRange(1, 365*5, 10), -+ }, []string{""status""}), -+ -+ retentionTenantsPerIteration: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{ -+ Namespace: metricsNamespace, -+ Subsystem: metricsSubsystem, -+ Name: ""retention_tenants_processed"", -+ Help: ""Number of tenants on which retention was applied during the retention process."", -+ // 1 tenant -> 10k tenants, 10 buckets -+ Buckets: prometheus.ExponentialBucketsRange(1, 10000, 10), -+ }, []string{""status""}), -+ -+ retentionTenantsExceedingLookback: promauto.With(r).NewGauge(prometheus.GaugeOpts{ -+ Namespace: metricsNamespace, -+ Subsystem: metricsSubsystem, -+ Name: ""retention_tenants_exceeding_lookback"", -+ Help: ""Number of tenants with a retention exceeding the configured retention lookback."", -+ }), - } - } - -diff --git a/pkg/bloombuild/planner/planner.go b/pkg/bloombuild/planner/planner.go -index 39ccfd2f7709d..08f6bb1c40bb7 100644 ---- a/pkg/bloombuild/planner/planner.go -+++ b/pkg/bloombuild/planner/planner.go -@@ -36,6 +36,7 @@ type Planner struct { - // Subservices manager. - subservices *services.Manager - subservicesWatcher *services.FailureWatcher -+ retentionManager *RetentionManager - - cfg Config - limits Limits -@@ -91,6 +92,14 @@ func New( - logger: logger, - } - -+ p.retentionManager = NewRetentionManager( -+ p.cfg.RetentionConfig, -+ p.limits, -+ p.bloomStore, -+ p.metrics, -+ p.logger, -+ ) -+ - svcs := []services.Service{p.tasksQueue, p.activeUsers} - p.subservices, err = services.NewManager(svcs...) - if err != nil { -@@ -184,6 +193,7 @@ type tenantTable struct { - - func (p *Planner) runOne(ctx context.Context) error { - var ( -+ wg sync.WaitGroup - start = time.Now() - status = statusFailure - ) -@@ -197,6 +207,16 @@ func (p *Planner) runOne(ctx context.Context) error { - }() - - p.metrics.buildStarted.Inc() -+ level.Info(p.logger).Log(""msg"", ""running bloom build iteration"") -+ -+ // Launch retention (will return instantly if retention is disabled) -+ wg.Add(1) -+ go func() { -+ defer wg.Done() -+ if err := p.retentionManager.Apply(ctx); err != nil { -+ level.Error(p.logger).Log(""msg"", ""failed apply retention"", ""err"", err) -+ } -+ }() - - tables := p.tables(time.Now()) - level.Debug(p.logger).Log(""msg"", ""loaded tables"", ""tables"", tables.TotalDays()) -@@ -265,7 +285,6 @@ func (p *Planner) runOne(ctx context.Context) error { - // TODO(salvacorts): This may end up creating too many goroutines. - // Create a pool of workers to process table-tenant tuples. - var tasksSucceed atomic.Int64 -- var wg sync.WaitGroup - for tt, results := range tasksResultForTenantTable { - if results.tasksToWait == 0 { - // No tasks enqueued for this tenant-table tuple, skip processing -diff --git a/pkg/bloombuild/planner/planner_test.go b/pkg/bloombuild/planner/planner_test.go -index 0e119cc1af229..ab34c82c6940d 100644 ---- a/pkg/bloombuild/planner/planner_test.go -+++ b/pkg/bloombuild/planner/planner_test.go -@@ -1019,6 +1019,7 @@ func (f *fakeBuilder) Recv() (*protos.BuilderToPlanner, error) { - } - - type fakeLimits struct { -+ Limits - timeout time.Duration - maxRetries int - } -diff --git a/pkg/bloombuild/planner/retention.go b/pkg/bloombuild/planner/retention.go -new file mode 100644 -index 0000000000000..8a937d332a42f ---- /dev/null -+++ b/pkg/bloombuild/planner/retention.go -@@ -0,0 +1,262 @@ -+package planner -+ -+import ( -+ ""context"" -+ ""flag"" -+ ""math"" -+ ""slices"" -+ ""time"" -+ -+ ""github.com/go-kit/log"" -+ ""github.com/go-kit/log/level"" -+ ""github.com/pkg/errors"" -+ ""github.com/prometheus/common/model"" -+ -+ ""github.com/grafana/loki/v3/pkg/storage/chunk/client"" -+ storageconfig ""github.com/grafana/loki/v3/pkg/storage/config"" -+ ""github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"" -+ ""github.com/grafana/loki/v3/pkg/validation"" -+) -+ -+type RetentionConfig struct { -+ Enabled bool `yaml:""enabled""` -+ MaxLookbackDays int `yaml:""max_lookback_days"" doc:""hidden""` -+} -+ -+func (cfg *RetentionConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { -+ f.BoolVar(&cfg.Enabled, prefix+"".enabled"", false, ""Enable bloom retention."") -+ f.IntVar(&cfg.MaxLookbackDays, prefix+"".max-lookback-days"", 365, ""Max lookback days for retention."") -+} -+ -+func (cfg *RetentionConfig) Validate() error { -+ if !cfg.Enabled { -+ return nil -+ } -+ -+ if cfg.MaxLookbackDays < 1 { -+ return errors.New(""max lookback days must be a positive number"") -+ } -+ return nil -+} -+ -+type RetentionLimits interface { -+ RetentionPeriod(userID string) time.Duration -+ StreamRetention(userID string) []validation.StreamRetention -+ AllByUserID() map[string]*validation.Limits -+ DefaultLimits() *validation.Limits -+} -+ -+type RetentionManager struct { -+ cfg RetentionConfig -+ limits RetentionLimits -+ bloomStore bloomshipper.StoreBase -+ metrics *Metrics -+ logger log.Logger -+ lastDayRun storageconfig.DayTime -+ -+ // For testing -+ now func() model.Time -+} -+ -+func NewRetentionManager( -+ cfg RetentionConfig, -+ limits RetentionLimits, -+ bloomStore bloomshipper.StoreBase, -+ metrics *Metrics, -+ logger log.Logger, -+) *RetentionManager { -+ return &RetentionManager{ -+ cfg: cfg, -+ limits: limits, -+ bloomStore: bloomStore, -+ metrics: metrics, -+ logger: log.With(logger, ""subcomponent"", ""retention-manager""), -+ now: model.Now, -+ lastDayRun: storageconfig.NewDayTime(0), -+ } -+} -+ -+func (r *RetentionManager) Apply(ctx context.Context) error { -+ if !r.cfg.Enabled { -+ level.Debug(r.logger).Log(""msg"", ""retention is disabled"") -+ return nil -+ } -+ -+ start := r.now() -+ today := storageconfig.NewDayTime(start) -+ if !today.After(r.lastDayRun) { -+ // We've already run retention for today -+ return nil -+ } -+ -+ level.Info(r.logger).Log(""msg"", ""Applying retention"", ""today"", today.String(), ""lastDayRun"", r.lastDayRun.String()) -+ r.metrics.retentionRunning.Set(1) -+ defer r.metrics.retentionRunning.Set(0) -+ -+ tenantsRetention := retentionByTenant(r.limits) -+ r.reportTenantsExceedingLookback(tenantsRetention) -+ -+ defaultLimits := r.limits.DefaultLimits() -+ defaultRetention := findLongestRetention(time.Duration(defaultLimits.RetentionPeriod), defaultLimits.StreamRetention) -+ -+ smallestRetention := smallestEnabledRetention(defaultRetention, tenantsRetention) -+ if smallestRetention == 0 { -+ level.Debug(r.logger).Log(""msg"", ""no retention period set for any tenant, skipping retention"") -+ return nil -+ } -+ -+ // Start day is today minus the smallest retention period. -+ // Note that the last retention day is exclusive. E.g. 30 days retention means we keep 30 days of data, -+ // thus we start deleting data from the 31st day onwards. -+ startDay := storageconfig.NewDayTime(today.Add(-smallestRetention)).Dec() -+ // End day is today minus the max lookback days -+ endDay := storageconfig.NewDayTime(today.Add(-time.Duration(r.cfg.MaxLookbackDays) * 24 * time.Hour)) -+ -+ var daysProcessed int -+ tenantsRetentionApplied := make(map[string]struct{}, 100) -+ for day := startDay; day.After(endDay); day = day.Dec() { -+ dayLogger := log.With(r.logger, ""day"", day.String()) -+ bloomClient, err := r.bloomStore.Client(day.ModelTime()) -+ if err != nil { -+ level.Error(dayLogger).Log(""msg"", ""failed to get bloom store client"", ""err"", err) -+ break -+ } -+ objectClient := bloomClient.ObjectClient() -+ -+ tenants, err := r.bloomStore.TenantFilesForInterval( -+ ctx, bloomshipper.NewInterval(day.Bounds()), -+ func(tenant string, _ client.StorageObject) bool { -+ // Filter out tenants whose retention hasn't expired yet -+ globalRetention := r.limits.RetentionPeriod(tenant) -+ streamRetention := r.limits.StreamRetention(tenant) -+ tenantRetention := findLongestRetention(globalRetention, streamRetention) -+ expirationDay := storageconfig.NewDayTime(today.Add(-tenantRetention)) -+ return day.Before(expirationDay) -+ }, -+ ) -+ if err != nil { -+ r.metrics.retentionTime.WithLabelValues(statusFailure).Observe(time.Since(start.Time()).Seconds()) -+ r.metrics.retentionDaysPerIteration.WithLabelValues(statusFailure).Observe(float64(daysProcessed)) -+ r.metrics.retentionTenantsPerIteration.WithLabelValues(statusFailure).Observe(float64(len(tenantsRetentionApplied))) -+ return errors.Wrap(err, ""getting users for period"") -+ } -+ -+ if len(tenants) == 0 { -+ // No tenants for this day means we can break here since previous -+ // retention iterations have already deleted all tenants -+ break -+ } -+ -+ for tenant, objects := range tenants { -+ if len(objects) == 0 { -+ continue -+ } -+ -+ tenantLogger := log.With(dayLogger, ""tenant"", tenant) -+ level.Info(tenantLogger).Log(""msg"", ""applying retention to tenant"", ""keys"", len(objects)) -+ -+ // Note: we cannot delete the tenant directory directly because it is not an -+ // actual key in the object store. Instead, we need to delete all keys one by one. -+ for _, object := range objects { -+ if err := objectClient.DeleteObject(ctx, object.Key); err != nil { -+ r.metrics.retentionTime.WithLabelValues(statusFailure).Observe(time.Since(start.Time()).Seconds()) -+ r.metrics.retentionDaysPerIteration.WithLabelValues(statusFailure).Observe(float64(daysProcessed)) -+ r.metrics.retentionTenantsPerIteration.WithLabelValues(statusFailure).Observe(float64(len(tenantsRetentionApplied))) -+ return errors.Wrapf(err, ""deleting key %s"", object.Key) -+ } -+ } -+ -+ tenantsRetentionApplied[tenant] = struct{}{} -+ } -+ -+ daysProcessed++ -+ } -+ -+ r.lastDayRun = today -+ r.metrics.retentionTime.WithLabelValues(statusSuccess).Observe(time.Since(start.Time()).Seconds()) -+ r.metrics.retentionDaysPerIteration.WithLabelValues(statusSuccess).Observe(float64(daysProcessed)) -+ r.metrics.retentionTenantsPerIteration.WithLabelValues(statusSuccess).Observe(float64(len(tenantsRetentionApplied))) -+ level.Info(r.logger).Log(""msg"", ""finished applying retention"", ""daysProcessed"", daysProcessed, ""tenants"", len(tenantsRetentionApplied)) -+ -+ return nil -+} -+ -+func (r *RetentionManager) reportTenantsExceedingLookback(retentionByTenant map[string]time.Duration) { -+ if len(retentionByTenant) == 0 { -+ r.metrics.retentionTenantsExceedingLookback.Set(0) -+ return -+ } -+ -+ var tenantsExceedingLookback int -+ for tenant, retention := range retentionByTenant { -+ if retention > time.Duration(r.cfg.MaxLookbackDays)*24*time.Hour { -+ level.Warn(r.logger).Log(""msg"", ""tenant retention exceeds max lookback days"", ""tenant"", tenant, ""retention"", retention.String()) -+ } -+ tenantsExceedingLookback++ -+ } -+ -+ r.metrics.retentionTenantsExceedingLookback.Set(float64(tenantsExceedingLookback)) -+} -+ -+func findLongestRetention(globalRetention time.Duration, streamRetention []validation.StreamRetention) time.Duration { -+ if len(streamRetention) == 0 { -+ return globalRetention -+ } -+ -+ maxStreamRetention := slices.MaxFunc(streamRetention, func(a, b validation.StreamRetention) int { -+ return int(a.Period - b.Period) -+ }) -+ -+ if time.Duration(maxStreamRetention.Period) > globalRetention { -+ return time.Duration(maxStreamRetention.Period) -+ } -+ return globalRetention -+} -+ -+func retentionByTenant(limits RetentionLimits) map[string]time.Duration { -+ all := limits.AllByUserID() -+ if len(all) == 0 { -+ return nil -+ } -+ -+ retentions := make(map[string]time.Duration, len(all)) -+ for tenant, lim := range all { -+ retention := findLongestRetention(time.Duration(lim.RetentionPeriod), lim.StreamRetention) -+ if retention == 0 { -+ continue -+ } -+ retentions[tenant] = retention -+ } -+ -+ return retentions -+} -+ -+// smallestEnabledRetention returns the smallest retention period across all tenants and the default. -+func smallestEnabledRetention(defaultRetention time.Duration, perTenantRetention map[string]time.Duration) time.Duration { -+ if len(perTenantRetention) == 0 { -+ return defaultRetention -+ } -+ -+ smallest := time.Duration(math.MaxInt64) -+ if defaultRetention != 0 { -+ smallest = defaultRetention -+ } -+ -+ for _, retention := range perTenantRetention { -+ // Skip unlimited retention -+ if retention == 0 { -+ continue -+ } -+ -+ if retention < smallest { -+ smallest = retention -+ } -+ } -+ -+ if smallest == time.Duration(math.MaxInt64) { -+ // No tenant nor defaults configures a retention -+ return 0 -+ } -+ -+ return smallest -+} -diff --git a/pkg/bloombuild/planner/retention_test.go b/pkg/bloombuild/planner/retention_test.go -new file mode 100644 -index 0000000000000..15118aeca70ae ---- /dev/null -+++ b/pkg/bloombuild/planner/retention_test.go -@@ -0,0 +1,762 @@ -+package planner -+ -+import ( -+ ""context"" -+ ""math"" -+ ""testing"" -+ ""time"" -+ -+ ""github.com/go-kit/log"" -+ ""github.com/prometheus/client_golang/prometheus"" -+ ""github.com/prometheus/common/model"" -+ ""github.com/stretchr/testify/require"" -+ -+ ""github.com/grafana/loki/v3/pkg/storage"" -+ v1 ""github.com/grafana/loki/v3/pkg/storage/bloom/v1"" -+ ""github.com/grafana/loki/v3/pkg/storage/chunk/cache"" -+ ""github.com/grafana/loki/v3/pkg/storage/chunk/client/local"" -+ storageconfig ""github.com/grafana/loki/v3/pkg/storage/config"" -+ ""github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"" -+ ""github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper/config"" -+ ""github.com/grafana/loki/v3/pkg/storage/types"" -+ ""github.com/grafana/loki/v3/pkg/util/mempool"" -+ ""github.com/grafana/loki/v3/pkg/validation"" -+) -+ -+var testTime = parseDayTime(""2024-12-31"").ModelTime() -+ -+func TestRetention(t *testing.T) { -+ for _, tc := range []struct { -+ name string -+ cfg RetentionConfig -+ lim mockRetentionLimits -+ prePopulate func(t *testing.T, schemaCfg storageconfig.SchemaConfig, bloomStore *bloomshipper.BloomStore) -+ expectErr bool -+ check func(t *testing.T, bloomStore *bloomshipper.BloomStore) -+ }{ -+ { -+ name: ""retention disabled"", -+ cfg: RetentionConfig{ -+ Enabled: false, -+ MaxLookbackDays: 2 * 365, -+ }, -+ lim: mockRetentionLimits{ -+ retention: map[string]time.Duration{ -+ ""1"": 30 * 24 * time.Hour, -+ ""2"": 200 * 24 * time.Hour, -+ ""3"": 500 * 24 * time.Hour, -+ }, -+ }, -+ prePopulate: func(t *testing.T, schemaCfg storageconfig.SchemaConfig, bloomStore *bloomshipper.BloomStore) { -+ putMetasForLastNDays(t, schemaCfg, bloomStore, ""1"", testTime, 200) -+ putMetasForLastNDays(t, schemaCfg, bloomStore, ""2"", testTime, 50) -+ putMetasForLastNDays(t, schemaCfg, bloomStore, ""3"", testTime, 500) -+ }, -+ check: func(t *testing.T, bloomStore *bloomshipper.BloomStore) { -+ metas := getGroupedMetasForLastNDays(t, bloomStore, ""1"", testTime, 500) -+ require.Equal(t, 1, len(metas)) -+ require.Equal(t, 200, len(metas[0])) -+ metas = getGroupedMetasForLastNDays(t, bloomStore, ""2"", testTime, 500) -+ require.Equal(t, 1, len(metas)) -+ require.Equal(t, 50, len(metas[0])) -+ metas = getGroupedMetasForLastNDays(t, bloomStore, ""3"", testTime, 500) -+ require.Equal(t, 1, len(metas)) -+ require.Equal(t, 500, len(metas[0])) -+ }, -+ }, -+ { -+ name: ""unlimited retention"", -+ cfg: RetentionConfig{ -+ Enabled: true, -+ MaxLookbackDays: 2 * 365, -+ }, -+ lim: mockRetentionLimits{ -+ retention: map[string]time.Duration{ -+ ""1"": 0, -+ }, -+ }, -+ prePopulate: func(t *testing.T, schemaCfg storageconfig.SchemaConfig, bloomStore *bloomshipper.BloomStore) { -+ putMetasForLastNDays(t, schemaCfg, bloomStore, ""1"", testTime, 200) -+ }, -+ check: func(t *testing.T, bloomStore *bloomshipper.BloomStore) { -+ metas := getGroupedMetasForLastNDays(t, bloomStore, ""1"", testTime, 500) -+ require.Equal(t, 1, len(metas)) -+ require.Equal(t, 200, len(metas[0])) -+ }, -+ }, -+ { -+ name: ""default retention"", -+ cfg: RetentionConfig{ -+ Enabled: true, -+ MaxLookbackDays: 2 * 365, -+ }, -+ lim: mockRetentionLimits{ -+ defaultRetention: 30 * 24 * time.Hour, -+ }, -+ prePopulate: func(t *testing.T, schemaCfg storageconfig.SchemaConfig, bloomStore *bloomshipper.BloomStore) { -+ putMetasForLastNDays(t, schemaCfg, bloomStore, ""1"", testTime, 200) -+ }, -+ check: func(t *testing.T, bloomStore *bloomshipper.BloomStore) { -+ metas := getGroupedMetasForLastNDays(t, bloomStore, ""1"", testTime, 500) -+ require.Equal(t, 1, len(metas)) -+ require.Equal(t, 31, len(metas[0])) -+ }, -+ }, -+ { -+ name: ""retention lookback smaller than max retention"", -+ cfg: RetentionConfig{ -+ Enabled: true, -+ MaxLookbackDays: 100, -+ }, -+ lim: mockRetentionLimits{ -+ retention: map[string]time.Duration{ -+ ""1"": 30 * 24 * time.Hour, -+ ""2"": 20 * 24 * time.Hour, -+ ""3"": 200 * 24 * time.Hour, -+ ""4"": 400 * 24 * time.Hour, -+ }, -+ streamRetention: map[string][]validation.StreamRetention{ -+ ""1"": { -+ { -+ Period: model.Duration(30 * 24 * time.Hour), -+ }, -+ { -+ Period: model.Duration(40 * 24 * time.Hour), -+ }, -+ }, -+ ""2"": { -+ { -+ Period: model.Duration(10 * 24 * time.Hour), -+ }, -+ }, -+ }, -+ }, -+ prePopulate: func(t *testing.T, schemaCfg storageconfig.SchemaConfig, bloomStore *bloomshipper.BloomStore) { -+ putMetasForLastNDays(t, schemaCfg, bloomStore, ""1"", testTime, 200) -+ putMetasForLastNDays(t, schemaCfg, bloomStore, ""2"", testTime, 50) -+ putMetasForLastNDays(t, schemaCfg, bloomStore, ""3"", testTime, 500) -+ putMetasForLastNDays(t, schemaCfg, bloomStore, ""4"", testTime, 500) -+ }, -+ check: func(t *testing.T, bloomStore *bloomshipper.BloomStore) { -+ // Tenant 1 has 40 days of retention, and we wrote 200 days of metas -+ // We should get two groups: 0th-40th and 101th-200th -+ metas := getGroupedMetasForLastNDays(t, bloomStore, ""1"", testTime, 500) -+ require.Equal(t, 2, len(metas)) -+ require.Equal(t, 41, len(metas[0])) // 0-40th day -+ require.Equal(t, 100, len(metas[1])) // 100th-200th day -+ -+ // Tenant 2 has 20 days of retention, and we wrote 50 days of metas -+ // We should get one group: 0th-20th -+ metas = getGroupedMetasForLastNDays(t, bloomStore, ""2"", testTime, 500) -+ require.Equal(t, 1, len(metas)) -+ require.Equal(t, 21, len(metas[0])) // 0th-20th -+ -+ // Tenant 3 has 200 days of retention, and we wrote 500 days of metas -+ // Since the manager looks up to 100 days, we shouldn't have deleted any metas -+ metas = getGroupedMetasForLastNDays(t, bloomStore, ""3"", testTime, 500) -+ require.Equal(t, 1, len(metas)) -+ require.Equal(t, 500, len(metas[0])) // 0th-500th -+ -+ // Tenant 4 has 400 days of retention, and we wrote 500 days of metas -+ // Since the manager looks up to 100 days, we shouldn't have deleted any metas -+ metas = getGroupedMetasForLastNDays(t, bloomStore, ""4"", testTime, 500) -+ require.Equal(t, 1, len(metas)) -+ require.Equal(t, 500, len(metas[0])) // 0th-500th -+ }, -+ }, -+ { -+ name: ""retention lookback bigger than max retention"", -+ cfg: RetentionConfig{ -+ Enabled: true, -+ MaxLookbackDays: 2 * 365, -+ }, -+ lim: mockRetentionLimits{ -+ retention: map[string]time.Duration{ -+ ""1"": 30 * 24 * time.Hour, -+ ""2"": 20 * 24 * time.Hour, -+ ""3"": 200 * 24 * time.Hour, -+ ""4"": 400 * 24 * time.Hour, -+ }, -+ streamRetention: map[string][]validation.StreamRetention{ -+ ""1"": { -+ { -+ Period: model.Duration(30 * 24 * time.Hour), -+ }, -+ { -+ Period: model.Duration(40 * 24 * time.Hour), -+ }, -+ }, -+ ""2"": { -+ { -+ Period: model.Duration(10 * 24 * time.Hour), -+ }, -+ }, -+ }, -+ }, -+ prePopulate: func(t *testing.T, schemaCfg storageconfig.SchemaConfig, bloomStore *bloomshipper.BloomStore) { -+ putMetasForLastNDays(t, schemaCfg, bloomStore, ""1"", testTime, 200) -+ putMetasForLastNDays(t, schemaCfg, bloomStore, ""2"", testTime, 50) -+ putMetasForLastNDays(t, schemaCfg, bloomStore, ""3"", testTime, 500) -+ putMetasForLastNDays(t, schemaCfg, bloomStore, ""4"", testTime, 500) -+ }, -+ check: func(t *testing.T, bloomStore *bloomshipper.BloomStore) { -+ // Tenant 1 has 40 days of retention, and we wrote 200 days of metas -+ // We should get one groups: 0th-40th -+ metas := getGroupedMetasForLastNDays(t, bloomStore, ""1"", testTime, 500) -+ require.Equal(t, 1, len(metas)) -+ require.Equal(t, 41, len(metas[0])) // 0-40th day -+ -+ // Tenant 2 has 20 days of retention, and we wrote 50 days of metas -+ // We should get one group: 0th-20th -+ metas = getGroupedMetasForLastNDays(t, bloomStore, ""2"", testTime, 500) -+ require.Equal(t, 1, len(metas)) -+ require.Equal(t, 21, len(metas[0])) // 0th-20th -+ -+ // Tenant 3 has 200 days of retention, and we wrote 500 days of metas -+ // We should get one group: 0th-200th -+ metas = getGroupedMetasForLastNDays(t, bloomStore, ""3"", testTime, 500) -+ require.Equal(t, 1, len(metas)) -+ require.Equal(t, 201, len(metas[0])) // 0th-200th -+ -+ // Tenant 4 has 400 days of retention, and we wrote 500 days of metas -+ // Since the manager looks up to 100 days, we shouldn't have deleted any metas -+ metas = getGroupedMetasForLastNDays(t, bloomStore, ""4"", testTime, 500) -+ require.Equal(t, 1, len(metas)) -+ require.Equal(t, 401, len(metas[0])) // 0th-400th -+ }, -+ }, -+ { -+ name: ""hit no tenants in table"", -+ cfg: RetentionConfig{ -+ Enabled: true, -+ MaxLookbackDays: 2 * 365, -+ }, -+ lim: mockRetentionLimits{ -+ retention: map[string]time.Duration{ -+ ""1"": 30 * 24 * time.Hour, -+ }, -+ }, -+ prePopulate: func(t *testing.T, schemaCfg storageconfig.SchemaConfig, bloomStore *bloomshipper.BloomStore) { -+ // Place metas with a gap of 50 days. [0th-100th], [151th-200th] -+ putMetasForLastNDays(t, schemaCfg, bloomStore, ""1"", testTime, 100) -+ putMetasForLastNDays(t, schemaCfg, bloomStore, ""1"", testTime.Add(-150*24*time.Hour), 50) -+ }, -+ check: func(t *testing.T, bloomStore *bloomshipper.BloomStore) { -+ // We should get two groups: 0th-30th and 151th-200th -+ metas := getGroupedMetasForLastNDays(t, bloomStore, ""1"", testTime, 500) -+ require.Equal(t, 2, len(metas)) -+ require.Equal(t, 31, len(metas[0])) // 0th-30th day -+ require.Equal(t, 50, len(metas[1])) // 151th-200th day -+ }, -+ }, -+ } { -+ t.Run(tc.name, func(t *testing.T) { -+ logger := log.NewNopLogger() -+ //logger := log.NewLogfmtLogger(os.Stdout) -+ -+ bloomStore, schema, _, err := NewMockBloomStore(t, logger) -+ require.NoError(t, err) -+ -+ rm := NewRetentionManager( -+ tc.cfg, -+ tc.lim, -+ bloomStore, -+ NewMetrics(nil, nil), -+ logger, -+ ) -+ rm.now = func() model.Time { -+ return testTime -+ } -+ -+ tc.prePopulate(t, schema, bloomStore) -+ -+ err = rm.Apply(context.Background()) -+ if tc.expectErr { -+ require.Error(t, err) -+ return -+ } -+ require.NoError(t, err) -+ -+ tc.check(t, bloomStore) -+ }) -+ } -+} -+ -+func TestRetentionRunsOncePerDay(t *testing.T) { -+ logger := log.NewNopLogger() -+ //logger := log.NewLogfmtLogger(os.Stdout) -+ -+ bloomStore, schema, _, err := NewMockBloomStore(t, logger) -+ require.NoError(t, err) -+ -+ rm := NewRetentionManager( -+ RetentionConfig{ -+ Enabled: true, -+ MaxLookbackDays: 365, -+ }, -+ mockRetentionLimits{ -+ retention: map[string]time.Duration{ -+ ""1"": 30 * 24 * time.Hour, -+ }, -+ }, -+ bloomStore, -+ NewMetrics(nil, nil), -+ logger, -+ ) -+ rm.now = func() model.Time { -+ return testTime -+ } -+ -+ // Write metas for the last 100 days and run retention -+ putMetasForLastNDays(t, schema, bloomStore, ""1"", testTime, 100) -+ err = rm.Apply(context.Background()) -+ require.NoError(t, err) -+ -+ // We should get only the first 30 days of metas -+ metas := getGroupedMetasForLastNDays(t, bloomStore, ""1"", testTime, 100) -+ require.Equal(t, 1, len(metas)) -+ require.Equal(t, 31, len(metas[0])) // 0th-30th day -+ -+ // We now change the now() time to be a bit later in the day -+ rm.now = func() model.Time { -+ return testTime.Add(1 * time.Hour) -+ } -+ -+ // Write metas again and run retention. Since we already ran retention at now()'s day, -+ // Apply should be a noop, and therefore we should be able to get all the 100 days of metas -+ putMetasForLastNDays(t, schema, bloomStore, ""1"", testTime, 100) -+ err = rm.Apply(context.Background()) -+ require.NoError(t, err) -+ -+ metas = getGroupedMetasForLastNDays(t, bloomStore, ""1"", testTime, 100) -+ require.Equal(t, 1, len(metas)) -+ require.Equal(t, 100, len(metas[0])) -+ -+ // We now change the now() time to be the next day, retention should run again -+ rm.now = func() model.Time { -+ return testTime.Add(24 * time.Hour) -+ } -+ err = rm.Apply(context.Background()) -+ require.NoError(t, err) -+ -+ // We should only see the first 30 days of metas -+ metas = getGroupedMetasForLastNDays(t, bloomStore, ""1"", testTime, 100) -+ require.Equal(t, 1, len(metas)) -+ require.Equal(t, 30, len(metas[0])) // 0th-30th day -+} -+ -+func TestFindLongestRetention(t *testing.T) { -+ for _, tc := range []struct { -+ name string -+ globalRetention time.Duration -+ streamRetention []validation.StreamRetention -+ expectedRetention time.Duration -+ }{ -+ { -+ name: ""no retention"", -+ expectedRetention: 0, -+ }, -+ { -+ name: ""global retention"", -+ globalRetention: 30 * 24 * time.Hour, -+ expectedRetention: 30 * 24 * time.Hour, -+ }, -+ { -+ name: ""stream retention"", -+ streamRetention: []validation.StreamRetention{ -+ { -+ Period: model.Duration(30 * 24 * time.Hour), -+ }, -+ }, -+ expectedRetention: 30 * 24 * time.Hour, -+ }, -+ { -+ name: ""two stream retention"", -+ streamRetention: []validation.StreamRetention{ -+ { -+ Period: model.Duration(30 * 24 * time.Hour), -+ }, -+ { -+ Period: model.Duration(40 * 24 * time.Hour), -+ }, -+ }, -+ expectedRetention: 40 * 24 * time.Hour, -+ }, -+ { -+ name: ""stream retention bigger than global"", -+ globalRetention: 20 * 24 * time.Hour, -+ streamRetention: []validation.StreamRetention{ -+ { -+ Period: model.Duration(30 * 24 * time.Hour), -+ }, -+ { -+ Period: model.Duration(40 * 24 * time.Hour), -+ }, -+ }, -+ expectedRetention: 40 * 24 * time.Hour, -+ }, -+ { -+ name: ""global retention bigger than stream"", -+ globalRetention: 40 * 24 * time.Hour, -+ streamRetention: []validation.StreamRetention{ -+ { -+ Period: model.Duration(20 * 24 * time.Hour), -+ }, -+ { -+ Period: model.Duration(30 * 24 * time.Hour), -+ }, -+ }, -+ expectedRetention: 40 * 24 * time.Hour, -+ }, -+ } { -+ t.Run(tc.name, func(t *testing.T) { -+ retention := findLongestRetention(tc.globalRetention, tc.streamRetention) -+ require.Equal(t, tc.expectedRetention, retention) -+ }) -+ } -+} -+ -+func TestSmallestRetention(t *testing.T) { -+ for _, tc := range []struct { -+ name string -+ limits RetentionLimits -+ expectedRetention time.Duration -+ expectedHasRetention bool -+ }{ -+ { -+ name: ""no retention"", -+ limits: mockRetentionLimits{}, -+ expectedRetention: 0, -+ }, -+ { -+ name: ""default global retention"", -+ limits: mockRetentionLimits{ -+ defaultRetention: 30 * 24 * time.Hour, -+ }, -+ expectedRetention: 30 * 24 * time.Hour, -+ }, -+ { -+ name: ""default stream retention"", -+ limits: mockRetentionLimits{ -+ defaultStreamRetention: []validation.StreamRetention{ -+ { -+ Period: model.Duration(30 * 24 * time.Hour), -+ }, -+ }, -+ }, -+ expectedRetention: 30 * 24 * time.Hour, -+ }, -+ { -+ name: ""tenant configured unlimited"", -+ limits: mockRetentionLimits{ -+ retention: map[string]time.Duration{ -+ ""1"": 0, -+ }, -+ defaultRetention: 30 * 24 * time.Hour, -+ }, -+ expectedRetention: 30 * 24 * time.Hour, -+ }, -+ { -+ name: ""no default one tenant"", -+ limits: mockRetentionLimits{ -+ retention: map[string]time.Duration{ -+ ""1"": 30 * 24 * time.Hour, -+ }, -+ streamRetention: map[string][]validation.StreamRetention{ -+ ""1"": { -+ { -+ Period: model.Duration(40 * 24 * time.Hour), -+ }, -+ }, -+ }, -+ }, -+ expectedRetention: 40 * 24 * time.Hour, -+ }, -+ { -+ name: ""no default two tenants"", -+ limits: mockRetentionLimits{ -+ retention: map[string]time.Duration{ -+ ""1"": 30 * 24 * time.Hour, -+ ""2"": 20 * 24 * time.Hour, -+ }, -+ streamRetention: map[string][]validation.StreamRetention{ -+ ""1"": { -+ { -+ Period: model.Duration(40 * 24 * time.Hour), -+ }, -+ }, -+ ""2"": { -+ { -+ Period: model.Duration(10 * 24 * time.Hour), -+ }, -+ }, -+ }, -+ }, -+ expectedRetention: 20 * 24 * time.Hour, -+ }, -+ { -+ name: ""default bigger than tenant"", -+ limits: mockRetentionLimits{ -+ retention: map[string]time.Duration{ -+ ""1"": 10 * 24 * time.Hour, -+ }, -+ streamRetention: map[string][]validation.StreamRetention{ -+ ""1"": { -+ { -+ Period: model.Duration(20 * 24 * time.Hour), -+ }, -+ }, -+ }, -+ defaultRetention: 40 * 24 * time.Hour, -+ defaultStreamRetention: []validation.StreamRetention{ -+ { -+ Period: model.Duration(30 * 24 * time.Hour), -+ }, -+ }, -+ }, -+ expectedRetention: 20 * 24 * time.Hour, -+ }, -+ { -+ name: ""tenant bigger than default"", -+ limits: mockRetentionLimits{ -+ retention: map[string]time.Duration{ -+ ""1"": 30 * 24 * time.Hour, -+ }, -+ streamRetention: map[string][]validation.StreamRetention{ -+ ""1"": { -+ { -+ Period: model.Duration(40 * 24 * time.Hour), -+ }, -+ }, -+ }, -+ defaultRetention: 10 * 24 * time.Hour, -+ defaultStreamRetention: []validation.StreamRetention{ -+ { -+ Period: model.Duration(20 * 24 * time.Hour), -+ }, -+ }, -+ }, -+ expectedRetention: 20 * 24 * time.Hour, -+ }, -+ } { -+ t.Run(tc.name, func(t *testing.T) { -+ defaultLim := tc.limits.DefaultLimits() -+ defaultRetention := findLongestRetention(time.Duration(defaultLim.RetentionPeriod), defaultLim.StreamRetention) -+ tenantsRetention := retentionByTenant(tc.limits) -+ -+ retention := smallestEnabledRetention(defaultRetention, tenantsRetention) -+ require.Equal(t, tc.expectedRetention, retention) -+ }) -+ } -+} -+ -+func TestRetentionConfigValidate(t *testing.T) { -+ for _, tc := range []struct { -+ name string -+ cfg RetentionConfig -+ expectErr bool -+ }{ -+ { -+ name: ""enabled and valid"", -+ cfg: RetentionConfig{ -+ Enabled: true, -+ MaxLookbackDays: 2 * 365, -+ }, -+ expectErr: false, -+ }, -+ { -+ name: ""invalid max lookback days"", -+ cfg: RetentionConfig{ -+ Enabled: true, -+ MaxLookbackDays: 0, -+ }, -+ expectErr: true, -+ }, -+ { -+ name: ""disabled and invalid"", -+ cfg: RetentionConfig{ -+ Enabled: false, -+ MaxLookbackDays: 0, -+ }, -+ expectErr: false, -+ }, -+ } { -+ t.Run(tc.name, func(t *testing.T) { -+ err := tc.cfg.Validate() -+ if tc.expectErr { -+ require.Error(t, err) -+ return -+ } -+ require.NoError(t, err) -+ }) -+ } -+} -+ -+func putMetasForLastNDays(t *testing.T, schemaCfg storageconfig.SchemaConfig, bloomStore *bloomshipper.BloomStore, tenant string, start model.Time, days int) { -+ const metasPerDay = 2 -+ -+ startDay := storageconfig.NewDayTime(start) -+ endDay := storageconfig.NewDayTime(startDay.Add(-time.Duration(days) * 24 * time.Hour)) -+ for day := startDay; day.After(endDay); day = day.Dec() { -+ period, err := schemaCfg.SchemaForTime(day.ModelTime()) -+ require.NoError(t, err) -+ -+ dayTable := storageconfig.NewDayTable(day, period.IndexTables.Prefix) -+ bloomClient, err := bloomStore.Client(dayTable.ModelTime()) -+ require.NoErrorf(t, err, ""failed to get bloom client for day %d: %s"", day, err) -+ -+ for i := 0; i < metasPerDay; i++ { -+ err = bloomClient.PutMeta(context.Background(), bloomshipper.Meta{ -+ MetaRef: bloomshipper.MetaRef{ -+ Ref: bloomshipper.Ref{ -+ TenantID: tenant, -+ TableName: dayTable.String(), -+ Bounds: v1.NewBounds(model.Fingerprint(i*100), model.Fingerprint(i*100+100)), -+ }, -+ }, -+ Blocks: []bloomshipper.BlockRef{}, -+ }) -+ require.NoError(t, err) -+ } -+ } -+} -+ -+// getMetasForLastNDays returns groups of continuous metas for the last N days. -+func getGroupedMetasForLastNDays(t *testing.T, bloomStore *bloomshipper.BloomStore, tenant string, start model.Time, days int) [][][]bloomshipper.Meta { -+ metasGrouped := make([][][]bloomshipper.Meta, 0) -+ currentGroup := make([][]bloomshipper.Meta, 0) -+ -+ startDay := storageconfig.NewDayTime(start) -+ endDay := storageconfig.NewDayTime(startDay.Add(-time.Duration(days) * 24 * time.Hour)) -+ -+ for day := startDay; day.After(endDay); day = day.Dec() { -+ metas, err := bloomStore.FetchMetas(context.Background(), bloomshipper.MetaSearchParams{ -+ TenantID: tenant, -+ Interval: bloomshipper.NewInterval(day.Bounds()), -+ Keyspace: v1.NewBounds(0, math.MaxUint64), -+ }) -+ require.NoError(t, err) -+ if len(metas) == 0 { -+ // We have reached the end of the metas group: cut a new group -+ if len(currentGroup) > 0 { -+ metasGrouped = append(metasGrouped, currentGroup) -+ currentGroup = make([][]bloomshipper.Meta, 0) -+ } -+ continue -+ } -+ currentGroup = append(currentGroup, metas) -+ } -+ -+ // Append the last group if it's not empty -+ if len(currentGroup) > 0 { -+ metasGrouped = append(metasGrouped, currentGroup) -+ } -+ -+ return metasGrouped -+} -+ -+func NewMockBloomStore(t *testing.T, logger log.Logger) (*bloomshipper.BloomStore, storageconfig.SchemaConfig, string, error) { -+ workDir := t.TempDir() -+ return NewMockBloomStoreWithWorkDir(t, workDir, logger) -+} -+ -+func NewMockBloomStoreWithWorkDir(t *testing.T, workDir string, logger log.Logger) (*bloomshipper.BloomStore, storageconfig.SchemaConfig, string, error) { -+ schemaCfg := storageconfig.SchemaConfig{ -+ Configs: []storageconfig.PeriodConfig{ -+ { -+ ObjectType: types.StorageTypeFileSystem, -+ From: storageconfig.DayTime{ -+ Time: testTime.Add(-2 * 365 * 24 * time.Hour), // -2 year -+ }, -+ IndexTables: storageconfig.IndexPeriodicTableConfig{ -+ PeriodicTableConfig: storageconfig.PeriodicTableConfig{ -+ Period: 24 * time.Hour, -+ Prefix: ""schema_a_table_"", -+ }}, -+ }, -+ { -+ ObjectType: types.StorageTypeFileSystem, -+ From: storageconfig.DayTime{ -+ Time: testTime.Add(-365 * 24 * time.Hour), // -1 year -+ }, -+ IndexTables: storageconfig.IndexPeriodicTableConfig{ -+ PeriodicTableConfig: storageconfig.PeriodicTableConfig{ -+ Period: 24 * time.Hour, -+ Prefix: ""schema_b_table_"", -+ }}, -+ }, -+ }, -+ } -+ -+ storageConfig := storage.Config{ -+ FSConfig: local.FSConfig{ -+ Directory: workDir, -+ }, -+ BloomShipperConfig: config.Config{ -+ WorkingDirectory: []string{workDir}, -+ DownloadParallelism: 1, -+ BlocksCache: config.BlocksCacheConfig{ -+ SoftLimit: 1 << 20, -+ HardLimit: 2 << 20, -+ TTL: time.Hour, -+ PurgeInterval: time.Hour, -+ }, -+ }, -+ } -+ -+ reg := prometheus.NewPedanticRegistry() -+ metrics := storage.NewClientMetrics() -+ t.Cleanup(metrics.Unregister) -+ -+ metasCache := cache.NewMockCache() -+ blocksCache := bloomshipper.NewFsBlocksCache(storageConfig.BloomShipperConfig.BlocksCache, prometheus.NewPedanticRegistry(), logger) -+ -+ store, err := bloomshipper.NewBloomStore(schemaCfg.Configs, storageConfig, metrics, metasCache, blocksCache, &mempool.SimpleHeapAllocator{}, reg, logger) -+ if err == nil { -+ t.Cleanup(store.Stop) -+ } -+ -+ return store, schemaCfg, workDir, err -+} -+ -+type mockRetentionLimits struct { -+ retention map[string]time.Duration -+ streamRetention map[string][]validation.StreamRetention -+ defaultRetention time.Duration -+ defaultStreamRetention []validation.StreamRetention -+} -+ -+func (m mockRetentionLimits) RetentionPeriod(tenant string) time.Duration { -+ return m.retention[tenant] -+} -+ -+func (m mockRetentionLimits) StreamRetention(tenant string) []validation.StreamRetention { -+ return m.streamRetention[tenant] -+} -+ -+func (m mockRetentionLimits) AllByUserID() map[string]*validation.Limits { -+ tenants := make(map[string]*validation.Limits, len(m.retention)) -+ -+ for tenant, retention := range m.retention { -+ if _, ok := tenants[tenant]; !ok { -+ tenants[tenant] = &validation.Limits{} -+ } -+ tenants[tenant].RetentionPeriod = model.Duration(retention) -+ } -+ -+ for tenant, streamRetention := range m.streamRetention { -+ if _, ok := tenants[tenant]; !ok { -+ tenants[tenant] = &validation.Limits{} -+ } -+ tenants[tenant].StreamRetention = streamRetention -+ } -+ -+ return tenants -+} -+ -+func (m mockRetentionLimits) DefaultLimits() *validation.Limits { -+ return &validation.Limits{ -+ RetentionPeriod: model.Duration(m.defaultRetention), -+ StreamRetention: m.defaultStreamRetention, -+ } -+}",refactor,Apply retention in planner (#13484) -b247a5450da4be9d0abfa868694c5b4b6dec1c2f,2018-06-27 10:05:35,Tom Wilkie,"Command to fetch label names. - -Signed-off-by: Tom Wilkie ",False,"diff --git a/cmd/logcli/main.go b/cmd/logcli/main.go -index 0c2023416af30..bfc6b43c743bb 100644 ---- a/cmd/logcli/main.go -+++ b/cmd/logcli/main.go -@@ -34,7 +34,7 @@ var ( - forward = queryCmd.Flag(""forward"", ""Scan forwards through logs."").Default(""false"").Bool() - - labelsCmd = app.Command(""labels"", ""Find values for a given label."") -- labelName = labelsCmd.Arg(""label"", ""The name of the label."").Required().String() -+ labelName = labelsCmd.Arg(""label"", ""The name of the label."").String() - ) - - func main() { -@@ -49,7 +49,12 @@ func main() { - } - - func label() { -- path := fmt.Sprintf(""/api/prom/label/%s/values"", *labelName) -+ var path string -+ if labelName != nil { -+ path = fmt.Sprintf(""/api/prom/label/%s/values"", url.PathEscape(*labelName)) -+ } else { -+ path = ""/api/prom/label"" -+ } - var labelResponse logproto.LabelResponse - doRequest(path, &labelResponse) - for _, value := range labelResponse.Values { -diff --git a/cmd/querier/main.go b/cmd/querier/main.go -index 4dfa5e32ebd88..4f91b1a3bc1ff 100644 ---- a/cmd/querier/main.go -+++ b/cmd/querier/main.go -@@ -68,6 +68,7 @@ func main() { - ) - - server.HTTP.Handle(""/api/prom/query"", httpMiddleware.Wrap(http.HandlerFunc(querier.QueryHandler))) -+ server.HTTP.Handle(""/api/prom/label"", httpMiddleware.Wrap(http.HandlerFunc(querier.LabelHandler))) - server.HTTP.Handle(""/api/prom/label/{name}/values"", httpMiddleware.Wrap(http.HandlerFunc(querier.LabelHandler))) - server.Run() - } -diff --git a/pkg/ingester/index.go b/pkg/ingester/index.go -index c3aad41be1630..cb41b4c179191 100644 ---- a/pkg/ingester/index.go -+++ b/pkg/ingester/index.go -@@ -71,6 +71,17 @@ func (i *invertedIndex) lookup(matchers []*labels.Matcher) []string { - return intersection - } - -+func (i *invertedIndex) labelNames() []string { -+ i.mtx.RLock() -+ defer i.mtx.RUnlock() -+ -+ res := make([]string, 0, len(i.idx)) -+ for name := range i.idx { -+ res = append(res, name) -+ } -+ return res -+} -+ - func (i *invertedIndex) lookupLabelValues(name string) []string { - i.mtx.RLock() - defer i.mtx.RUnlock() -diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go -index 1d7233dfb6d58..5ae6bfd515b38 100644 ---- a/pkg/ingester/instance.go -+++ b/pkg/ingester/instance.go -@@ -94,7 +94,12 @@ func (i *instance) Query(req *logproto.QueryRequest, queryServer logproto.Querie - } - - func (i *instance) Label(ctx context.Context, req *logproto.LabelRequest) (*logproto.LabelResponse, error) { -- labels := i.index.lookupLabelValues(req.Name) -+ var labels []string -+ if req.Values { -+ labels = i.index.lookupLabelValues(req.Name) -+ } else { -+ labels = i.index.labelNames() -+ } - sort.Strings(labels) - return &logproto.LabelResponse{ - Values: labels, -diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go -index 9b0673551d56e..634662b06a291 100644 ---- a/pkg/logproto/logproto.pb.go -+++ b/pkg/logproto/logproto.pb.go -@@ -163,7 +163,8 @@ func (m *QueryResponse) GetStreams() []*Stream { - } - - type LabelRequest struct { -- Name string `protobuf:""bytes,1,opt,name=name,proto3"" json:""name,omitempty""` -+ Name string `protobuf:""bytes,1,opt,name=name,proto3"" json:""name,omitempty""` -+ Values bool `protobuf:""varint,2,opt,name=values,proto3"" json:""values,omitempty""` - } - - func (m *LabelRequest) Reset() { *m = LabelRequest{} } -@@ -177,6 +178,13 @@ func (m *LabelRequest) GetName() string { - return """" - } - -+func (m *LabelRequest) GetValues() bool { -+ if m != nil { -+ return m.Values -+ } -+ return false -+} -+ - type LabelResponse struct { - Values []string `protobuf:""bytes,1,rep,name=values"" json:""values,omitempty""` - } -@@ -396,6 +404,9 @@ func (this *LabelRequest) Equal(that interface{}) bool { - if this.Name != that1.Name { - return false - } -+ if this.Values != that1.Values { -+ return false -+ } - return true - } - func (this *LabelResponse) Equal(that interface{}) bool { -@@ -538,9 +549,10 @@ func (this *LabelRequest) GoString() string { - if this == nil { - return ""nil"" - } -- s := make([]string, 0, 5) -+ s := make([]string, 0, 6) - s = append(s, ""&logproto.LabelRequest{"") - s = append(s, ""Name: ""+fmt.Sprintf(""%#v"", this.Name)+"",\n"") -+ s = append(s, ""Values: ""+fmt.Sprintf(""%#v"", this.Values)+"",\n"") - s = append(s, ""}"") - return strings.Join(s, """") - } -@@ -976,6 +988,16 @@ func (m *LabelRequest) MarshalTo(dAtA []byte) (int, error) { - i = encodeVarintLogproto(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } -+ if m.Values { -+ dAtA[i] = 0x10 -+ i++ -+ if m.Values { -+ dAtA[i] = 1 -+ } else { -+ dAtA[i] = 0 -+ } -+ i++ -+ } - return i, nil - } - -@@ -1150,6 +1172,9 @@ func (m *LabelRequest) Size() (n int) { - if l > 0 { - n += 1 + l + sovLogproto(uint64(l)) - } -+ if m.Values { -+ n += 2 -+ } - return n - } - -@@ -1256,6 +1281,7 @@ func (this *LabelRequest) String() string { - } - s := strings.Join([]string{`&LabelRequest{`, - `Name:` + fmt.Sprintf(""%v"", this.Name) + `,`, -+ `Values:` + fmt.Sprintf(""%v"", this.Values) + `,`, - `}`, - }, """") - return s -@@ -1776,6 +1802,26 @@ func (m *LabelRequest) Unmarshal(dAtA []byte) error { - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex -+ case 2: -+ if wireType != 0 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field Values"", wireType) -+ } -+ var v int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowLogproto -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ v |= (int(b) & 0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ m.Values = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipLogproto(dAtA[iNdEx:]) -@@ -2203,44 +2249,44 @@ var ( - func init() { proto.RegisterFile(""logproto.proto"", fileDescriptorLogproto) } - - var fileDescriptorLogproto = []byte{ -- // 612 bytes of a gzipped FileDescriptorProto -- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x4d, 0x6f, 0xd3, 0x40, -- 0x10, 0xf5, 0xb6, 0xf9, 0x68, 0x26, 0x69, 0xa9, 0x16, 0x28, 0x56, 0x0e, 0x6e, 0x64, 0x24, 0x88, -- 0x2a, 0xb0, 0x69, 0x90, 0x40, 0x05, 0x2e, 0x4d, 0x0b, 0x42, 0x02, 0x89, 0xd6, 0x20, 0x71, 0xac, -- 0x9c, 0x74, 0x71, 0x2c, 0x6c, 0x6f, 0xba, 0x5e, 0x57, 0xf4, 0x86, 0xc4, 0x1f, 0xa8, 0xc4, 0x9f, -- 0xe0, 0xa7, 0xf4, 0xd8, 0x23, 0x27, 0xa0, 0xe6, 0xc2, 0xb1, 0x37, 0xae, 0x68, 0x3f, 0x1c, 0x1b, -- 0x90, 0x10, 0xbd, 0x58, 0xf3, 0x66, 0xdf, 0x7b, 0x9e, 0x99, 0x9d, 0x85, 0xa5, 0x88, 0x06, 0x53, -- 0x46, 0x39, 0x75, 0xe4, 0x17, 0x2f, 0x14, 0xb8, 0xbb, 0x1a, 0x50, 0x1a, 0x44, 0xc4, 0x95, 0x68, -- 0x94, 0xbd, 0x71, 0x79, 0x18, 0x93, 0x94, 0xfb, 0xf1, 0x54, 0x51, 0xbb, 0xb7, 0x83, 0x90, 0x4f, -- 0xb2, 0x91, 0x33, 0xa6, 0xb1, 0x1b, 0xd0, 0x80, 0x96, 0x4c, 0x81, 0x24, 0x90, 0x91, 0xa6, 0x6f, -- 0x28, 0x3f, 0x27, 0xa0, 0x91, 0x9f, 0x04, 0x0e, 0x65, 0x81, 0x1b, 0xb0, 0xe9, 0xd8, 0x9d, 0x10, -- 0x3f, 0xe2, 0x13, 0x19, 0xef, 0xa9, 0x78, 0xef, 0x70, 0x5d, 0x67, 0x95, 0xd4, 0xde, 0x80, 0xf6, -- 0x4e, 0x96, 0x4e, 0x3c, 0x72, 0x90, 0x91, 0x94, 0xe3, 0x35, 0x68, 0xa6, 0x9c, 0x11, 0x3f, 0x4e, -- 0x4d, 0xd4, 0x9b, 0xef, 0xb7, 0x07, 0xcb, 0xce, 0xac, 0x8b, 0x97, 0xf2, 0xc0, 0x2b, 0x08, 0xf6, -- 0x12, 0x74, 0x94, 0x34, 0x9d, 0xd2, 0x24, 0x25, 0xf6, 0x4f, 0x04, 0x9d, 0xdd, 0x8c, 0xb0, 0xa3, -- 0xc2, 0xec, 0x0a, 0xd4, 0x0f, 0x04, 0x36, 0x51, 0x0f, 0xf5, 0x5b, 0x9e, 0x02, 0x22, 0x1b, 0x85, -- 0x71, 0xc8, 0xcd, 0xb9, 0x1e, 0xea, 0x2f, 0x7a, 0x0a, 0xe0, 0x07, 0x50, 0x4f, 0xb9, 0xcf, 0xb8, -- 0x39, 0xdf, 0x43, 0xfd, 0xf6, 0xa0, 0xeb, 0xe8, 0x96, 0x8a, 0xc6, 0x9d, 0x57, 0xc5, 0x88, 0x86, -- 0x0b, 0x27, 0x5f, 0x56, 0x8d, 0xe3, 0xaf, 0xab, 0xc8, 0x53, 0x12, 0x7c, 0x0f, 0xe6, 0x49, 0xb2, -- 0x6f, 0xd6, 0x2e, 0xa0, 0x14, 0x02, 0xbc, 0x0e, 0xad, 0xfd, 0x90, 0x91, 0x31, 0x0f, 0x69, 0x62, -- 0xd6, 0x7b, 0xa8, 0xbf, 0x34, 0xb8, 0x5c, 0xb6, 0xbb, 0x5d, 0x1c, 0x79, 0x25, 0x4b, 0x14, 0xcf, -- 0x48, 0x40, 0xde, 0x99, 0x0d, 0xd5, 0x92, 0x04, 0xf6, 0x43, 0x58, 0xd4, 0x8d, 0xab, 0x51, 0x5c, -- 0x68, 0x8c, 0x36, 0x74, 0x9e, 0xfb, 0x23, 0x12, 0x15, 0x53, 0xc3, 0x50, 0x4b, 0xfc, 0x98, 0xe8, -- 0xa1, 0xc9, 0xd8, 0xbe, 0x09, 0x8b, 0x9a, 0xa3, 0x7f, 0xb0, 0x02, 0x8d, 0x43, 0x3f, 0xca, 0x88, -- 0xf2, 0x6f, 0x79, 0x1a, 0xd9, 0xbb, 0xd0, 0x50, 0xfe, 0x82, 0x11, 0x09, 0x49, 0xaa, 0x8d, 0x34, -- 0xc2, 0x2e, 0x34, 0x49, 0xc2, 0x59, 0x48, 0x52, 0x73, 0x4e, 0x96, 0x76, 0xa9, 0x2c, 0xed, 0x71, -- 0xc2, 0xd9, 0xd1, 0xb0, 0x26, 0xa6, 0xe4, 0x15, 0x2c, 0x7b, 0x0f, 0xea, 0x32, 0x8f, 0x87, 0xd0, -- 0x9a, 0xed, 0xa9, 0x34, 0xfd, 0xdf, 0x61, 0x97, 0x32, 0xd1, 0x5c, 0x14, 0x26, 0x44, 0xde, 0x7d, -- 0xcb, 0x93, 0xf1, 0xda, 0x0d, 0x68, 0xcd, 0x66, 0x8d, 0xdb, 0xd0, 0x7c, 0xf2, 0xc2, 0x7b, 0xbd, -- 0xe9, 0x6d, 0x2f, 0x1b, 0xb8, 0x03, 0x0b, 0xc3, 0xcd, 0xad, 0x67, 0x12, 0xa1, 0xc1, 0x47, 0x04, -- 0x0d, 0xb1, 0x70, 0x84, 0xe1, 0xfb, 0x50, 0x13, 0x11, 0xbe, 0x5a, 0xd6, 0x5e, 0xd9, 0xe2, 0xee, -- 0xca, 0x9f, 0x69, 0xbd, 0xa1, 0x06, 0xde, 0x81, 0xfa, 0xd6, 0x84, 0x8c, 0xdf, 0x62, 0xdb, 0x11, -- 0xaf, 0xc2, 0xd1, 0x6f, 0xe1, 0x70, 0xdd, 0x79, 0x2a, 0x23, 0x79, 0x58, 0xd8, 0x5c, 0xff, 0x27, -- 0x47, 0x79, 0x0e, 0x3e, 0x20, 0x68, 0x8a, 0xcb, 0x0f, 0x09, 0xc3, 0x8f, 0xa0, 0x2e, 0xf7, 0x00, -- 0x57, 0x0a, 0xa8, 0xbe, 0x88, 0xee, 0xb5, 0xbf, 0xf2, 0x45, 0x65, 0x77, 0x90, 0x78, 0x02, 0xf2, -- 0x92, 0xab, 0xea, 0xea, 0x66, 0x54, 0xd5, 0xbf, 0x6d, 0x83, 0x6d, 0x0c, 0x6f, 0x9d, 0x9e, 0x59, -- 0xc6, 0xe7, 0x33, 0xcb, 0x38, 0x3f, 0xb3, 0xd0, 0xfb, 0xdc, 0x42, 0x9f, 0x72, 0x0b, 0x9d, 0xe4, -- 0x16, 0x3a, 0xcd, 0x2d, 0xf4, 0x2d, 0xb7, 0xd0, 0x8f, 0xdc, 0x32, 0xce, 0x73, 0x0b, 0x1d, 0x7f, -- 0xb7, 0x8c, 0x51, 0x43, 0x9a, 0xdc, 0xfd, 0x15, 0x00, 0x00, 0xff, 0xff, 0x86, 0x37, 0x4d, 0x36, -- 0xa2, 0x04, 0x00, 0x00, -+ // 617 bytes of a gzipped FileDescriptorProto -+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x6a, 0xd4, 0x50, -+ 0x14, 0xce, 0xb5, 0xf3, 0x7b, 0x66, 0x5a, 0xcb, 0x55, 0x6b, 0x98, 0x45, 0x3a, 0x44, 0xd0, 0xa1, -+ 0x68, 0xc6, 0x8e, 0xa0, 0xb4, 0xba, 0xe9, 0xb4, 0x8a, 0xa0, 0x60, 0x1b, 0x05, 0x97, 0x25, 0x33, -+ 0xbd, 0x66, 0x82, 0x49, 0xee, 0xf4, 0xe6, 0xa6, 0xd8, 0x9d, 0xe0, 0x0b, 0x14, 0x7c, 0x09, 0x1f, -+ 0xa5, 0xcb, 0x2e, 0x5d, 0xa9, 0x13, 0x37, 0x2e, 0xbb, 0x73, 0x2b, 0xf7, 0x27, 0x93, 0xa8, 0x20, -+ 0x76, 0x13, 0xce, 0x77, 0xf2, 0x7d, 0xe7, 0xef, 0x9e, 0x03, 0x4b, 0x21, 0xf5, 0xa7, 0x8c, 0x72, -+ 0xea, 0xc8, 0x2f, 0x6e, 0xe4, 0xb8, 0xb3, 0xea, 0x53, 0xea, 0x87, 0xa4, 0x2f, 0xd1, 0x28, 0x7d, -+ 0xd3, 0xe7, 0x41, 0x44, 0x12, 0xee, 0x45, 0x53, 0x45, 0xed, 0xdc, 0xf1, 0x03, 0x3e, 0x49, 0x47, -+ 0xce, 0x98, 0x46, 0x7d, 0x9f, 0xfa, 0xb4, 0x60, 0x0a, 0x24, 0x81, 0xb4, 0x34, 0x7d, 0x43, 0xc5, -+ 0x73, 0x7c, 0x1a, 0x7a, 0xb1, 0xef, 0x50, 0xe6, 0xf7, 0x7d, 0x36, 0x1d, 0xf7, 0x27, 0xc4, 0x0b, -+ 0xf9, 0x44, 0xda, 0xfb, 0xca, 0xde, 0x3f, 0x5a, 0xd7, 0x5e, 0x25, 0xb5, 0x37, 0xa0, 0xb5, 0x9b, -+ 0x26, 0x13, 0x97, 0x1c, 0xa6, 0x24, 0xe1, 0x78, 0x0d, 0xea, 0x09, 0x67, 0xc4, 0x8b, 0x12, 0x13, -+ 0x75, 0x17, 0x7a, 0xad, 0xc1, 0xb2, 0x33, 0xef, 0xe2, 0xa5, 0xfc, 0xe1, 0xe6, 0x04, 0x7b, 0x09, -+ 0xda, 0x4a, 0x9a, 0x4c, 0x69, 0x9c, 0x10, 0xfb, 0x27, 0x82, 0xf6, 0x5e, 0x4a, 0xd8, 0x71, 0x1e, -+ 0xec, 0x2a, 0x54, 0x0f, 0x05, 0x36, 0x51, 0x17, 0xf5, 0x9a, 0xae, 0x02, 0xc2, 0x1b, 0x06, 0x51, -+ 0xc0, 0xcd, 0x4b, 0x5d, 0xd4, 0x5b, 0x74, 0x15, 0xc0, 0x9b, 0x50, 0x4d, 0xb8, 0xc7, 0xb8, 0xb9, -+ 0xd0, 0x45, 0xbd, 0xd6, 0xa0, 0xe3, 0xe8, 0x96, 0xf2, 0xc6, 0x9d, 0x57, 0xf9, 0x88, 0x86, 0x8d, -+ 0xd3, 0x2f, 0xab, 0xc6, 0xc9, 0xd7, 0x55, 0xe4, 0x2a, 0x09, 0xbe, 0x0f, 0x0b, 0x24, 0x3e, 0x30, -+ 0x2b, 0x17, 0x50, 0x0a, 0x01, 0x5e, 0x87, 0xe6, 0x41, 0xc0, 0xc8, 0x98, 0x07, 0x34, 0x36, 0xab, -+ 0x5d, 0xd4, 0x5b, 0x1a, 0x5c, 0x29, 0xda, 0xdd, 0xc9, 0x7f, 0xb9, 0x05, 0x4b, 0x14, 0xcf, 0x88, -+ 0x4f, 0xde, 0x99, 0x35, 0xd5, 0x92, 0x04, 0xf6, 0x43, 0x58, 0xd4, 0x8d, 0xab, 0x51, 0x5c, 0x68, -+ 0x8c, 0x9b, 0xd0, 0x7e, 0xee, 0x8d, 0x48, 0x98, 0x4f, 0x0d, 0x43, 0x25, 0xf6, 0x22, 0xa2, 0x87, -+ 0x26, 0x6d, 0xbc, 0x02, 0xb5, 0x23, 0x2f, 0x4c, 0x49, 0x22, 0x87, 0xd6, 0x70, 0x35, 0xb2, 0x6f, -+ 0xc1, 0xa2, 0xd6, 0xea, 0xc4, 0x05, 0x51, 0xe4, 0x6d, 0xce, 0x89, 0x7b, 0x50, 0x53, 0x79, 0x05, -+ 0x23, 0x14, 0x92, 0x44, 0x27, 0xd0, 0x08, 0xf7, 0xa1, 0x4e, 0x62, 0xce, 0x02, 0x99, 0x43, 0x94, -+ 0x7c, 0xb9, 0x28, 0xf9, 0x71, 0xcc, 0xd9, 0xf1, 0xb0, 0x22, 0xa6, 0xe7, 0xe6, 0x2c, 0x7b, 0x1f, -+ 0xaa, 0xd2, 0x8f, 0x87, 0xd0, 0x9c, 0xef, 0xaf, 0x0c, 0xfa, 0xbf, 0x8f, 0x50, 0xc8, 0x44, 0xd3, -+ 0x61, 0x10, 0x13, 0xd9, 0x5e, 0xd3, 0x95, 0xf6, 0xda, 0x4d, 0x68, 0xce, 0xdf, 0x00, 0xb7, 0xa0, -+ 0xfe, 0xe4, 0x85, 0xfb, 0x7a, 0xcb, 0xdd, 0x59, 0x36, 0x70, 0x1b, 0x1a, 0xc3, 0xad, 0xed, 0x67, -+ 0x12, 0xa1, 0xc1, 0x47, 0x04, 0x35, 0xb1, 0x88, 0x84, 0xe1, 0x07, 0x50, 0x11, 0x16, 0xbe, 0x56, -+ 0xd4, 0x5e, 0xda, 0xee, 0xce, 0xca, 0x9f, 0x6e, 0xbd, 0xb9, 0x06, 0xde, 0x85, 0xea, 0xf6, 0x84, -+ 0x8c, 0xdf, 0x62, 0xdb, 0x11, 0xd7, 0xe2, 0xe8, 0x1b, 0x39, 0x5a, 0x77, 0x9e, 0x4a, 0x4b, 0xfe, -+ 0xcc, 0xc3, 0xdc, 0xf8, 0x27, 0x47, 0xc5, 0x1c, 0x7c, 0x40, 0x50, 0x17, 0x4b, 0x11, 0x10, 0x86, -+ 0x1f, 0x41, 0x55, 0xee, 0x07, 0x2e, 0x15, 0x50, 0xbe, 0x94, 0xce, 0xf5, 0xbf, 0xfc, 0x79, 0x65, -+ 0x77, 0x91, 0x38, 0x0d, 0xf9, 0xc8, 0x65, 0x75, 0x79, 0x63, 0xca, 0xea, 0xdf, 0xb6, 0xc1, 0x36, -+ 0x86, 0xb7, 0xcf, 0x66, 0x96, 0xf1, 0x79, 0x66, 0x19, 0xe7, 0x33, 0x0b, 0xbd, 0xcf, 0x2c, 0xf4, -+ 0x29, 0xb3, 0xd0, 0x69, 0x66, 0xa1, 0xb3, 0xcc, 0x42, 0xdf, 0x32, 0x0b, 0xfd, 0xc8, 0x2c, 0xe3, -+ 0x3c, 0xb3, 0xd0, 0xc9, 0x77, 0xcb, 0x18, 0xd5, 0x64, 0x90, 0x7b, 0xbf, 0x02, 0x00, 0x00, 0xff, -+ 0xff, 0x04, 0xc7, 0xb1, 0xfd, 0xba, 0x04, 0x00, 0x00, - } -diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto -index 8333631269a25..2fa6b159937b1 100644 ---- a/pkg/logproto/logproto.proto -+++ b/pkg/logproto/logproto.proto -@@ -43,6 +43,7 @@ message QueryResponse { - - message LabelRequest { - string name = 1; -+ bool values = 2; // True to fetch label values, false for fetch labels names. - } - - message LabelResponse { -diff --git a/pkg/querier/http.go b/pkg/querier/http.go -index 8016afc455bb3..9992d53db9e18 100644 ---- a/pkg/querier/http.go -+++ b/pkg/querier/http.go -@@ -106,8 +106,10 @@ func (q *Querier) QueryHandler(w http.ResponseWriter, r *http.Request) { - } - - func (q *Querier) LabelHandler(w http.ResponseWriter, r *http.Request) { -+ name, ok := mux.Vars(r)[""name""] - req := &logproto.LabelRequest{ -- Name: mux.Vars(r)[""name""], -+ Values: !ok, -+ Name: name, - } - resp, err := q.Label(r.Context(), req) - if err != nil {",unknown,"Command to fetch label names. - -Signed-off-by: Tom Wilkie " -4bfcd09d6b82cbebdb3f2e419b4e6bcbb5627320,2019-06-24 23:39:46,Edward Welch,adding resource requests in jsonnet,False,"diff --git a/production/ksonnet/loki-canary/loki-canary.libsonnet b/production/ksonnet/loki-canary/loki-canary.libsonnet -index e940a417c157b..a07c920f6d2ac 100644 ---- a/production/ksonnet/loki-canary/loki-canary.libsonnet -+++ b/production/ksonnet/loki-canary/loki-canary.libsonnet -@@ -12,6 +12,7 @@ k + config { - - loki_canary_container:: - container.new('loki-canary', $._images.loki_canary) + -+ $.util.resourcesRequests('10m', '20Mi') + - container.withPorts($.core.v1.containerPort.new('http-metrics', 80)) + - container.withArgsMixin($.util.mapToFlags($.loki_canary_args)) + - container.withEnv([ -@@ -23,4 +24,4 @@ k + config { - - loki_canary_daemonset: - daemonSet.new('loki-canary', [$.loki_canary_container]), --} -\ No newline at end of file -+}",unknown,adding resource requests in jsonnet -929c8e4923258679e934991eec344629cafa36a4,2021-11-03 18:45:31,Christian Haudum,"Allow HTTP POST requests on ring pages (#4630) - -The ring pages offer the ability to ""forget"" instances of the ring and -this is implemented by a HTTP `form` element, which does a `POST` -request on itself. - -Signed-off-by: Christian Haudum ",False,"diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go -index b00ac1b4c1c7d..e3b37cfd759c1 100644 ---- a/pkg/loki/modules.go -+++ b/pkg/loki/modules.go -@@ -133,7 +133,7 @@ func (t *Loki) initRing() (_ services.Service, err error) { - return - } - prometheus.MustRegister(t.ring) -- t.Server.HTTP.Path(""/ring"").Methods(""GET"").Handler(t.ring) -+ t.Server.HTTP.Path(""/ring"").Methods(""GET"", ""POST"").Handler(t.ring) - return t.ring, nil - } - -@@ -604,7 +604,7 @@ func (t *Loki) initRuler() (_ services.Service, err error) { - // Expose HTTP endpoints. - if t.Cfg.Ruler.EnableAPI { - -- t.Server.HTTP.Path(""/ruler/ring"").Methods(""GET"").Handler(t.ruler) -+ t.Server.HTTP.Path(""/ruler/ring"").Methods(""GET"", ""POST"").Handler(t.ruler) - cortex_ruler.RegisterRulerServer(t.Server.GRPC, t.ruler) - - // Prometheus Rule API Routes -@@ -670,7 +670,7 @@ func (t *Loki) initCompactor() (services.Service, error) { - return nil, err - } - -- t.Server.HTTP.Path(""/compactor/ring"").Methods(""GET"").Handler(t.compactor) -+ t.Server.HTTP.Path(""/compactor/ring"").Methods(""GET"", ""POST"").Handler(t.compactor) - if t.Cfg.CompactorConfig.RetentionEnabled { - t.Server.HTTP.Path(""/loki/api/admin/delete"").Methods(""PUT"", ""POST"").Handler(t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.compactor.DeleteRequestsHandler.AddDeleteRequestHandler))) - t.Server.HTTP.Path(""/loki/api/admin/delete"").Methods(""GET"").Handler(t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.compactor.DeleteRequestsHandler.GetAllDeleteRequestsHandler))) -@@ -709,7 +709,7 @@ func (t *Loki) initQueryScheduler() (services.Service, error) { - - schedulerpb.RegisterSchedulerForFrontendServer(t.Server.GRPC, s) - schedulerpb.RegisterSchedulerForQuerierServer(t.Server.GRPC, s) -- t.Server.HTTP.Path(""/scheduler/ring"").Methods(""GET"").Handler(s) -+ t.Server.HTTP.Path(""/scheduler/ring"").Methods(""GET"", ""POST"").Handler(s) - t.queryScheduler = s - return s, nil - }",unknown,"Allow HTTP POST requests on ring pages (#4630) - -The ring pages offer the ability to ""forget"" instances of the ring and -this is implemented by a HTTP `form` element, which does a `POST` -request on itself. - -Signed-off-by: Christian Haudum " -a1f3478581f4ff50479d29eab18d7255991946b0,2025-02-26 23:54:19,renovate[bot],"chore(deps): update dependency vite to v6 (main) (#16476) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json -index 2411e845d0aaa..ce5562167dfcb 100644 ---- a/pkg/ui/frontend/package-lock.json -+++ b/pkg/ui/frontend/package-lock.json -@@ -67,7 +67,7 @@ - ""postcss"": ""^8.5.1"", - ""tailwindcss"": ""^3.4.1"", - ""typescript"": ""^5.0.2"", -- ""vite"": ""^5.1.0"" -+ ""vite"": ""^6.0.0"" - } - }, - ""node_modules/@alloc/quick-lru"": { -@@ -401,9 +401,9 @@ - } - }, - ""node_modules/@esbuild/aix-ppc64"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz"", -- ""integrity"": ""sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.0.tgz"", -+ ""integrity"": ""sha512-O7vun9Sf8DFjH2UtqK8Ku3LkquL9SZL8OLY1T5NZkA34+wG3OQF7cl4Ql8vdNzM6fzBbYfLaiRLIOZ+2FOCgBQ=="", - ""cpu"": [ - ""ppc64"" - ], -@@ -414,13 +414,13 @@ - ""aix"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/android-arm"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz"", -- ""integrity"": ""sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.0.tgz"", -+ ""integrity"": ""sha512-PTyWCYYiU0+1eJKmw21lWtC+d08JDZPQ5g+kFyxP0V+es6VPPSUhM6zk8iImp2jbV6GwjX4pap0JFbUQN65X1g=="", - ""cpu"": [ - ""arm"" - ], -@@ -431,13 +431,13 @@ - ""android"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/android-arm64"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz"", -- ""integrity"": ""sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.0.tgz"", -+ ""integrity"": ""sha512-grvv8WncGjDSyUBjN9yHXNt+cq0snxXbDxy5pJtzMKGmmpPxeAmAhWxXI+01lU5rwZomDgD3kJwulEnhTRUd6g=="", - ""cpu"": [ - ""arm64"" - ], -@@ -448,13 +448,13 @@ - ""android"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/android-x64"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz"", -- ""integrity"": ""sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.0.tgz"", -+ ""integrity"": ""sha512-m/ix7SfKG5buCnxasr52+LI78SQ+wgdENi9CqyCXwjVR2X4Jkz+BpC3le3AoBPYTC9NHklwngVXvbJ9/Akhrfg=="", - ""cpu"": [ - ""x64"" - ], -@@ -465,13 +465,13 @@ - ""android"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/darwin-arm64"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz"", -- ""integrity"": ""sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.0.tgz"", -+ ""integrity"": ""sha512-mVwdUb5SRkPayVadIOI78K7aAnPamoeFR2bT5nszFUZ9P8UpK4ratOdYbZZXYSqPKMHfS1wdHCJk1P1EZpRdvw=="", - ""cpu"": [ - ""arm64"" - ], -@@ -482,13 +482,13 @@ - ""darwin"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/darwin-x64"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz"", -- ""integrity"": ""sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.0.tgz"", -+ ""integrity"": ""sha512-DgDaYsPWFTS4S3nWpFcMn/33ZZwAAeAFKNHNa1QN0rI4pUjgqf0f7ONmXf6d22tqTY+H9FNdgeaAa+YIFUn2Rg=="", - ""cpu"": [ - ""x64"" - ], -@@ -499,13 +499,13 @@ - ""darwin"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/freebsd-arm64"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz"", -- ""integrity"": ""sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.0.tgz"", -+ ""integrity"": ""sha512-VN4ocxy6dxefN1MepBx/iD1dH5K8qNtNe227I0mnTRjry8tj5MRk4zprLEdG8WPyAPb93/e4pSgi1SoHdgOa4w=="", - ""cpu"": [ - ""arm64"" - ], -@@ -516,13 +516,13 @@ - ""freebsd"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/freebsd-x64"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz"", -- ""integrity"": ""sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.0.tgz"", -+ ""integrity"": ""sha512-mrSgt7lCh07FY+hDD1TxiTyIHyttn6vnjesnPoVDNmDfOmggTLXRv8Id5fNZey1gl/V2dyVK1VXXqVsQIiAk+A=="", - ""cpu"": [ - ""x64"" - ], -@@ -533,13 +533,13 @@ - ""freebsd"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/linux-arm"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz"", -- ""integrity"": ""sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.0.tgz"", -+ ""integrity"": ""sha512-vkB3IYj2IDo3g9xX7HqhPYxVkNQe8qTK55fraQyTzTX/fxaDtXiEnavv9geOsonh2Fd2RMB+i5cbhu2zMNWJwg=="", - ""cpu"": [ - ""arm"" - ], -@@ -550,13 +550,13 @@ - ""linux"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/linux-arm64"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz"", -- ""integrity"": ""sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.0.tgz"", -+ ""integrity"": ""sha512-9QAQjTWNDM/Vk2bgBl17yWuZxZNQIF0OUUuPZRKoDtqF2k4EtYbpyiG5/Dk7nqeK6kIJWPYldkOcBqjXjrUlmg=="", - ""cpu"": [ - ""arm64"" - ], -@@ -567,13 +567,13 @@ - ""linux"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/linux-ia32"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz"", -- ""integrity"": ""sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.0.tgz"", -+ ""integrity"": ""sha512-43ET5bHbphBegyeqLb7I1eYn2P/JYGNmzzdidq/w0T8E2SsYL1U6un2NFROFRg1JZLTzdCoRomg8Rvf9M6W6Gg=="", - ""cpu"": [ - ""ia32"" - ], -@@ -584,13 +584,13 @@ - ""linux"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/linux-loong64"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz"", -- ""integrity"": ""sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.0.tgz"", -+ ""integrity"": ""sha512-fC95c/xyNFueMhClxJmeRIj2yrSMdDfmqJnyOY4ZqsALkDrrKJfIg5NTMSzVBr5YW1jf+l7/cndBfP3MSDpoHw=="", - ""cpu"": [ - ""loong64"" - ], -@@ -601,13 +601,13 @@ - ""linux"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/linux-mips64el"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz"", -- ""integrity"": ""sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.0.tgz"", -+ ""integrity"": ""sha512-nkAMFju7KDW73T1DdH7glcyIptm95a7Le8irTQNO/qtkoyypZAnjchQgooFUDQhNAy4iu08N79W4T4pMBwhPwQ=="", - ""cpu"": [ - ""mips64el"" - ], -@@ -618,13 +618,13 @@ - ""linux"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/linux-ppc64"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz"", -- ""integrity"": ""sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.0.tgz"", -+ ""integrity"": ""sha512-NhyOejdhRGS8Iwv+KKR2zTq2PpysF9XqY+Zk77vQHqNbo/PwZCzB5/h7VGuREZm1fixhs4Q/qWRSi5zmAiO4Fw=="", - ""cpu"": [ - ""ppc64"" - ], -@@ -635,13 +635,13 @@ - ""linux"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/linux-riscv64"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz"", -- ""integrity"": ""sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.0.tgz"", -+ ""integrity"": ""sha512-5S/rbP5OY+GHLC5qXp1y/Mx//e92L1YDqkiBbO9TQOvuFXM+iDqUNG5XopAnXoRH3FjIUDkeGcY1cgNvnXp/kA=="", - ""cpu"": [ - ""riscv64"" - ], -@@ -652,13 +652,13 @@ - ""linux"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/linux-s390x"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz"", -- ""integrity"": ""sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.0.tgz"", -+ ""integrity"": ""sha512-XM2BFsEBz0Fw37V0zU4CXfcfuACMrppsMFKdYY2WuTS3yi8O1nFOhil/xhKTmE1nPmVyvQJjJivgDT+xh8pXJA=="", - ""cpu"": [ - ""s390x"" - ], -@@ -669,13 +669,13 @@ - ""linux"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/linux-x64"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz"", -- ""integrity"": ""sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.0.tgz"", -+ ""integrity"": ""sha512-9yl91rHw/cpwMCNytUDxwj2XjFpxML0y9HAOH9pNVQDpQrBxHy01Dx+vaMu0N1CKa/RzBD2hB4u//nfc+Sd3Cw=="", - ""cpu"": [ - ""x64"" - ], -@@ -686,13 +686,30 @@ - ""linux"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" -+ } -+ }, -+ ""node_modules/@esbuild/netbsd-arm64"": { -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.0.tgz"", -+ ""integrity"": ""sha512-RuG4PSMPFfrkH6UwCAqBzauBWTygTvb1nxWasEJooGSJ/NwRw7b2HOwyRTQIU97Hq37l3npXoZGYMy3b3xYvPw=="", -+ ""cpu"": [ -+ ""arm64"" -+ ], -+ ""dev"": true, -+ ""license"": ""MIT"", -+ ""optional"": true, -+ ""os"": [ -+ ""netbsd"" -+ ], -+ ""engines"": { -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/netbsd-x64"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz"", -- ""integrity"": ""sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.0.tgz"", -+ ""integrity"": ""sha512-jl+qisSB5jk01N5f7sPCsBENCOlPiS/xptD5yxOx2oqQfyourJwIKLRA2yqWdifj3owQZCL2sn6o08dBzZGQzA=="", - ""cpu"": [ - ""x64"" - ], -@@ -703,13 +720,30 @@ - ""netbsd"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" -+ } -+ }, -+ ""node_modules/@esbuild/openbsd-arm64"": { -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.0.tgz"", -+ ""integrity"": ""sha512-21sUNbq2r84YE+SJDfaQRvdgznTD8Xc0oc3p3iW/a1EVWeNj/SdUCbm5U0itZPQYRuRTW20fPMWMpcrciH2EJw=="", -+ ""cpu"": [ -+ ""arm64"" -+ ], -+ ""dev"": true, -+ ""license"": ""MIT"", -+ ""optional"": true, -+ ""os"": [ -+ ""openbsd"" -+ ], -+ ""engines"": { -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/openbsd-x64"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz"", -- ""integrity"": ""sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.0.tgz"", -+ ""integrity"": ""sha512-2gwwriSMPcCFRlPlKx3zLQhfN/2WjJ2NSlg5TKLQOJdV0mSxIcYNTMhk3H3ulL/cak+Xj0lY1Ym9ysDV1igceg=="", - ""cpu"": [ - ""x64"" - ], -@@ -720,13 +754,13 @@ - ""openbsd"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/sunos-x64"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz"", -- ""integrity"": ""sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.0.tgz"", -+ ""integrity"": ""sha512-bxI7ThgLzPrPz484/S9jLlvUAHYMzy6I0XiU1ZMeAEOBcS0VePBFxh1JjTQt3Xiat5b6Oh4x7UC7IwKQKIJRIg=="", - ""cpu"": [ - ""x64"" - ], -@@ -737,13 +771,13 @@ - ""sunos"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/win32-arm64"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz"", -- ""integrity"": ""sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.0.tgz"", -+ ""integrity"": ""sha512-ZUAc2YK6JW89xTbXvftxdnYy3m4iHIkDtK3CLce8wg8M2L+YZhIvO1DKpxrd0Yr59AeNNkTiic9YLf6FTtXWMw=="", - ""cpu"": [ - ""arm64"" - ], -@@ -754,13 +788,13 @@ - ""win32"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/win32-ia32"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz"", -- ""integrity"": ""sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.0.tgz"", -+ ""integrity"": ""sha512-eSNxISBu8XweVEWG31/JzjkIGbGIJN/TrRoiSVZwZ6pkC6VX4Im/WV2cz559/TXLcYbcrDN8JtKgd9DJVIo8GA=="", - ""cpu"": [ - ""ia32"" - ], -@@ -771,13 +805,13 @@ - ""win32"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@esbuild/win32-x64"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz"", -- ""integrity"": ""sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.0.tgz"", -+ ""integrity"": ""sha512-ZENoHJBxA20C2zFzh6AI4fT6RraMzjYw4xKWemRTRmRVtN9c5DcH9r/f2ihEkMjOW5eGgrwCslG/+Y/3bL+DHQ=="", - ""cpu"": [ - ""x64"" - ], -@@ -788,7 +822,7 @@ - ""win32"" - ], - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - } - }, - ""node_modules/@eslint-community/eslint-utils"": { -@@ -3896,9 +3930,9 @@ - } - }, - ""node_modules/esbuild"": { -- ""version"": ""0.21.5"", -- ""resolved"": ""https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz"", -- ""integrity"": ""sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw=="", -+ ""version"": ""0.25.0"", -+ ""resolved"": ""https://registry.npmjs.org/esbuild/-/esbuild-0.25.0.tgz"", -+ ""integrity"": ""sha512-BXq5mqc8ltbaN34cDqWuYKyNhX8D/Z0J1xdtdQ8UcIIIyJyz+ZMKUt58tF3SrZ85jcfN/PZYhjR5uDQAYNVbuw=="", - ""dev"": true, - ""hasInstallScript"": true, - ""license"": ""MIT"", -@@ -3906,32 +3940,34 @@ - ""esbuild"": ""bin/esbuild"" - }, - ""engines"": { -- ""node"": "">=12"" -+ ""node"": "">=18"" - }, - ""optionalDependencies"": { -- ""@esbuild/aix-ppc64"": ""0.21.5"", -- ""@esbuild/android-arm"": ""0.21.5"", -- ""@esbuild/android-arm64"": ""0.21.5"", -- ""@esbuild/android-x64"": ""0.21.5"", -- ""@esbuild/darwin-arm64"": ""0.21.5"", -- ""@esbuild/darwin-x64"": ""0.21.5"", -- ""@esbuild/freebsd-arm64"": ""0.21.5"", -- ""@esbuild/freebsd-x64"": ""0.21.5"", -- ""@esbuild/linux-arm"": ""0.21.5"", -- ""@esbuild/linux-arm64"": ""0.21.5"", -- ""@esbuild/linux-ia32"": ""0.21.5"", -- ""@esbuild/linux-loong64"": ""0.21.5"", -- ""@esbuild/linux-mips64el"": ""0.21.5"", -- ""@esbuild/linux-ppc64"": ""0.21.5"", -- ""@esbuild/linux-riscv64"": ""0.21.5"", -- ""@esbuild/linux-s390x"": ""0.21.5"", -- ""@esbuild/linux-x64"": ""0.21.5"", -- ""@esbuild/netbsd-x64"": ""0.21.5"", -- ""@esbuild/openbsd-x64"": ""0.21.5"", -- ""@esbuild/sunos-x64"": ""0.21.5"", -- ""@esbuild/win32-arm64"": ""0.21.5"", -- ""@esbuild/win32-ia32"": ""0.21.5"", -- ""@esbuild/win32-x64"": ""0.21.5"" -+ ""@esbuild/aix-ppc64"": ""0.25.0"", -+ ""@esbuild/android-arm"": ""0.25.0"", -+ ""@esbuild/android-arm64"": ""0.25.0"", -+ ""@esbuild/android-x64"": ""0.25.0"", -+ ""@esbuild/darwin-arm64"": ""0.25.0"", -+ ""@esbuild/darwin-x64"": ""0.25.0"", -+ ""@esbuild/freebsd-arm64"": ""0.25.0"", -+ ""@esbuild/freebsd-x64"": ""0.25.0"", -+ ""@esbuild/linux-arm"": ""0.25.0"", -+ ""@esbuild/linux-arm64"": ""0.25.0"", -+ ""@esbuild/linux-ia32"": ""0.25.0"", -+ ""@esbuild/linux-loong64"": ""0.25.0"", -+ ""@esbuild/linux-mips64el"": ""0.25.0"", -+ ""@esbuild/linux-ppc64"": ""0.25.0"", -+ ""@esbuild/linux-riscv64"": ""0.25.0"", -+ ""@esbuild/linux-s390x"": ""0.25.0"", -+ ""@esbuild/linux-x64"": ""0.25.0"", -+ ""@esbuild/netbsd-arm64"": ""0.25.0"", -+ ""@esbuild/netbsd-x64"": ""0.25.0"", -+ ""@esbuild/openbsd-arm64"": ""0.25.0"", -+ ""@esbuild/openbsd-x64"": ""0.25.0"", -+ ""@esbuild/sunos-x64"": ""0.25.0"", -+ ""@esbuild/win32-arm64"": ""0.25.0"", -+ ""@esbuild/win32-ia32"": ""0.25.0"", -+ ""@esbuild/win32-x64"": ""0.25.0"" - } - }, - ""node_modules/escalade"": { -@@ -5389,9 +5425,9 @@ - } - }, - ""node_modules/postcss"": { -- ""version"": ""8.5.1"", -- ""resolved"": ""https://registry.npmjs.org/postcss/-/postcss-8.5.1.tgz"", -- ""integrity"": ""sha512-6oz2beyjc5VMn/KV1pPw8fliQkhBXrVn1Z3TVyqZxU8kZpzEKhBdmCFqI6ZbmGtamQvQGuU1sgPTk8ZrXDD7jQ=="", -+ ""version"": ""8.5.3"", -+ ""resolved"": ""https://registry.npmjs.org/postcss/-/postcss-8.5.3.tgz"", -+ ""integrity"": ""sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A=="", - ""funding"": [ - { - ""type"": ""opencollective"", -@@ -6663,21 +6699,21 @@ - } - }, - ""node_modules/vite"": { -- ""version"": ""5.4.14"", -- ""resolved"": ""https://registry.npmjs.org/vite/-/vite-5.4.14.tgz"", -- ""integrity"": ""sha512-EK5cY7Q1D8JNhSaPKVK4pwBFvaTmZxEnoKXLG/U9gmdDcihQGNzFlgIvaxezFR4glP1LsuiedwMBqCXH3wZccA=="", -+ ""version"": ""6.2.0"", -+ ""resolved"": ""https://registry.npmjs.org/vite/-/vite-6.2.0.tgz"", -+ ""integrity"": ""sha512-7dPxoo+WsT/64rDcwoOjk76XHj+TqNTIvHKcuMQ1k4/SeHDaQt5GFAeLYzrimZrMpn/O6DtdI03WUjdxuPM0oQ=="", - ""dev"": true, - ""license"": ""MIT"", - ""dependencies"": { -- ""esbuild"": ""^0.21.3"", -- ""postcss"": ""^8.4.43"", -- ""rollup"": ""^4.20.0"" -+ ""esbuild"": ""^0.25.0"", -+ ""postcss"": ""^8.5.3"", -+ ""rollup"": ""^4.30.1"" - }, - ""bin"": { - ""vite"": ""bin/vite.js"" - }, - ""engines"": { -- ""node"": ""^18.0.0 || >=20.0.0"" -+ ""node"": ""^18.0.0 || ^20.0.0 || >=22.0.0"" - }, - ""funding"": { - ""url"": ""https://github.com/vitejs/vite?sponsor=1"" -@@ -6686,19 +6722,25 @@ - ""fsevents"": ""~2.3.3"" - }, - ""peerDependencies"": { -- ""@types/node"": ""^18.0.0 || >=20.0.0"", -+ ""@types/node"": ""^18.0.0 || ^20.0.0 || >=22.0.0"", -+ ""jiti"": "">=1.21.0"", - ""less"": ""*"", - ""lightningcss"": ""^1.21.0"", - ""sass"": ""*"", - ""sass-embedded"": ""*"", - ""stylus"": ""*"", - ""sugarss"": ""*"", -- ""terser"": ""^5.4.0"" -+ ""terser"": ""^5.16.0"", -+ ""tsx"": ""^4.8.1"", -+ ""yaml"": ""^2.4.2"" - }, - ""peerDependenciesMeta"": { - ""@types/node"": { - ""optional"": true - }, -+ ""jiti"": { -+ ""optional"": true -+ }, - ""less"": { - ""optional"": true - }, -@@ -6719,6 +6761,12 @@ - }, - ""terser"": { - ""optional"": true -+ }, -+ ""tsx"": { -+ ""optional"": true -+ }, -+ ""yaml"": { -+ ""optional"": true - } - } - }, -diff --git a/pkg/ui/frontend/package.json b/pkg/ui/frontend/package.json -index 24da7e01cc723..4159d85fb249e 100644 ---- a/pkg/ui/frontend/package.json -+++ b/pkg/ui/frontend/package.json -@@ -69,6 +69,6 @@ - ""postcss"": ""^8.5.1"", - ""tailwindcss"": ""^3.4.1"", - ""typescript"": ""^5.0.2"", -- ""vite"": ""^5.1.0"" -+ ""vite"": ""^6.0.0"" - } - }",chore,"update dependency vite to v6 (main) (#16476) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -8393c3382afd0ec723e4810c2a50da1a2614cbc8,2019-10-15 18:51:52,Cyril Tovena,rollback fluent-bit push path until we release 0.4,False,"diff --git a/production/helm/fluent-bit/Chart.yaml b/production/helm/fluent-bit/Chart.yaml -index ae9b2f8d35510..909edfd06dfa1 100644 ---- a/production/helm/fluent-bit/Chart.yaml -+++ b/production/helm/fluent-bit/Chart.yaml -@@ -1,5 +1,5 @@ - name: fluent-bit --version: 0.0.1 -+version: 0.0.2 - appVersion: v0.0.1 - kubeVersion: ""^1.10.0-0"" - description: ""Uses fluent-bit Loki go plugin for gathering logs and sending them to Loki"" -diff --git a/production/helm/fluent-bit/templates/configmap.yaml b/production/helm/fluent-bit/templates/configmap.yaml -index 57c44dcd12540..551e0d9362798 100644 ---- a/production/helm/fluent-bit/templates/configmap.yaml -+++ b/production/helm/fluent-bit/templates/configmap.yaml -@@ -31,9 +31,9 @@ data: - Name loki - Match * - {{- if and .Values.loki.user .Values.loki.password }} -- Url {{ .Values.loki.serviceScheme }}://{{ .Values.loki.user }}:{{ .Values.loki.password }}@{{ include ""loki.serviceName"" . }}:{{ .Values.loki.servicePort }}/loki/api/v1/push -+ Url {{ .Values.loki.serviceScheme }}://{{ .Values.loki.user }}:{{ .Values.loki.password }}@{{ include ""loki.serviceName"" . }}:{{ .Values.loki.servicePort }}{{ .Values.loki.servicePath }} - {{- else }} -- Url {{ .Values.loki.serviceScheme }}://{{ include ""loki.serviceName"" . }}:{{ .Values.loki.servicePort }}/loki/api/v1/push -+ Url {{ .Values.loki.serviceScheme }}://{{ include ""loki.serviceName"" . }}:{{ .Values.loki.servicePort }}{{ .Values.loki.servicePath }} - {{- end }} - Labels {{ .Values.config.labels }} - RemoveKeys {{ include ""helm-toolkit.utils.joinListWithComma"" .Values.config.removeKeys }} -diff --git a/production/helm/fluent-bit/values.yaml b/production/helm/fluent-bit/values.yaml -index 1fabc811a3a12..7de07d89296d9 100644 ---- a/production/helm/fluent-bit/values.yaml -+++ b/production/helm/fluent-bit/values.yaml -@@ -3,6 +3,7 @@ loki: - serviceName: """" # Defaults to ""${RELEASE}-loki"" if not set - servicePort: 3100 - serviceScheme: http -+ servicePath: /api/prom/push - # user: user - # password: pass - config: -diff --git a/production/helm/loki-stack/Chart.yaml b/production/helm/loki-stack/Chart.yaml -index e4737e0190ab1..4f006b3cb1fb5 100644 ---- a/production/helm/loki-stack/Chart.yaml -+++ b/production/helm/loki-stack/Chart.yaml -@@ -1,5 +1,5 @@ - name: loki-stack --version: 0.17.1 -+version: 0.17.2 - appVersion: v0.3.0 - kubeVersion: ""^1.10.0-0"" - description: ""Loki: like Prometheus, but for logs.""",unknown,rollback fluent-bit push path until we release 0.4 -0d66319a1fce61d6c7a08a63198bd076d24d7554,2021-11-04 01:12:53,Dylan Guedes,"Revert distributor defaulting to inmemory. (#4638) - -- [PR 4440](https://github.com/grafana/loki/pull/4440/files) modified - the distributor setup to use inmemory by default (instead of consul). -That was necessary because, by changing the default rate limiting to -global, a ring was necessary, and since by default the ring was set to -consul, the project couldn't run with the default configs. That's not -the case anymore: right now we are reusing the same config set for the -ingester (which users already have properly configured otherwise they -wouldn't be able to run the project). List of changes: - - Remove changelog entries - - Remove upgrading guide entries - - Remove added tests (unnecessary checks) - - Remove value overriding",False,"diff --git a/CHANGELOG.md b/CHANGELOG.md -index 8be31abffdae5..65cd3fb36e1c1 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -1,7 +1,6 @@ - ## Main - * [4400](https://github.com/grafana/loki/pull/4400) **trevorwhitney**: Config: automatically apply memberlist config too all rings when provided - * [4435](https://github.com/grafana/loki/pull/4435) **trevorwhitney**: Change default values for two GRPC settings so querier can connect to frontend/scheduler --* [4440](https://github.com/grafana/loki/pull/4440) **DylanGuedes**: Config: Override distributor's default ring KV store - * [4443](https://github.com/grafana/loki/pull/4443) **DylanGuedes**: Loki: Change how push API checks for contentType - * [4415](https://github.com/grafana/loki/pull/4415) **DylanGuedes**: Change default limits to common values - * [4473](https://github.com/grafana/loki/pull/4473) **trevorwhitney**: Config: add object storage configuration to common config -diff --git a/docs/sources/upgrading/_index.md b/docs/sources/upgrading/_index.md -index 6f49f066e7e3e..d8b4c8404221f 100644 ---- a/docs/sources/upgrading/_index.md -+++ b/docs/sources/upgrading/_index.md -@@ -52,21 +52,6 @@ ingester: - enabled: true - ``` - --#### Distributor now stores ring in memory by default instead of Consul -- --PR [4440](https://github.com/grafana/loki/pull/4440) **DylanGuedes**: Config: Override distributor's default ring KV store -- --This change sets `inmemory` as the new default storage for the Distributor ring (previously `consul`). --The motivation is making the Distributor easier to run with default configs, by not requiring Consul anymore. --In any case, if you prefer to use Consul as the ring storage, you can set it by using the following config: -- --```yaml --distributor: -- ring: -- kvstore: -- store: consul --``` -- - #### Memberlist config now automatically applies to all non-configured rings - PR [4400](https://github.com/grafana/loki/pull/4400) **trevorwhitney**: Config: automatically apply memberlist config too all rings when provided - -diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go -index 1cfa7cb890c16..fc301d949ec76 100644 ---- a/pkg/distributor/distributor.go -+++ b/pkg/distributor/distributor.go -@@ -44,25 +44,8 @@ type Config struct { - } - - // RegisterFlags registers distributor-related flags. --// --// Since they are registered through an external library, we override some of them to set --// different default values. - func (cfg *Config) RegisterFlags(fs *flag.FlagSet) { -- throwaway := flag.NewFlagSet(""throwaway"", flag.PanicOnError) -- -- cfg.DistributorRing.RegisterFlags(throwaway) -- -- // Register to throwaway flags first. Default values are remembered during registration and cannot be changed, -- // but we can take values from throwaway flag set and reregister into supplied flags with new default values. -- throwaway.VisitAll(func(f *flag.Flag) { -- // Ignore errors when setting new values. We have a test to verify that it works. -- switch f.Name { -- case ""distributor.ring.store"": -- _ = f.Value.Set(""inmemory"") -- } -- -- fs.Var(f.Value, f.Name, f.Usage) -- }) -+ cfg.DistributorRing.RegisterFlags(fs) - } - - // Distributor coordinates replicates and distribution of log streams. -diff --git a/pkg/loki/loki_test.go b/pkg/loki/loki_test.go -index 4f69c225e9de6..19d45d4827743 100644 ---- a/pkg/loki/loki_test.go -+++ b/pkg/loki/loki_test.go -@@ -42,12 +42,7 @@ func TestFlagDefaults(t *testing.T) { - gotFlags[splittedLine] = nextLine - } - -- flagToCheck := ""-distributor.ring.store"" -- require.Contains(t, gotFlags, flagToCheck) -- require.Equal(t, c.Distributor.DistributorRing.KVStore.Store, ""inmemory"") -- require.Contains(t, gotFlags[flagToCheck], ""(default \""inmemory\"")"") -- -- flagToCheck = ""-server.grpc.keepalive.min-time-between-pings"" -+ flagToCheck := ""-server.grpc.keepalive.min-time-between-pings"" - require.Contains(t, gotFlags, flagToCheck) - require.Equal(t, c.Server.GRPCServerMinTimeBetweenPings, 10*time.Second) - require.Contains(t, gotFlags[flagToCheck], ""(default 10s)"")",unknown,"Revert distributor defaulting to inmemory. (#4638) - -- [PR 4440](https://github.com/grafana/loki/pull/4440/files) modified - the distributor setup to use inmemory by default (instead of consul). -That was necessary because, by changing the default rate limiting to -global, a ring was necessary, and since by default the ring was set to -consul, the project couldn't run with the default configs. That's not -the case anymore: right now we are reusing the same config set for the -ingester (which users already have properly configured otherwise they -wouldn't be able to run the project). List of changes: - - Remove changelog entries - - Remove upgrading guide entries - - Remove added tests (unnecessary checks) - - Remove value overriding" -5f73598389218d47913995ab704b85abbc6ae2bc,2018-12-05 18:36:01,Tom Wilkie,"Rename Tempo to Loki. (#36) - -* Rename Tempo to Loki. - -Signed-off-by: Tom Wilkie - -* Use new build image, don't delete generated files on clean. - -Signed-off-by: Tom Wilkie ",False,"diff --git a/.circleci/config.yml b/.circleci/config.yml -index c7e0e501de8a8..eb6fd27244119 100644 ---- a/.circleci/config.yml -+++ b/.circleci/config.yml -@@ -22,8 +22,8 @@ workflows: - # https://circleci.com/blog/circleci-hacks-reuse-yaml-in-your-circleci-config-with-yaml/ - defaults: &defaults - docker: -- - image: grafana/tempo-build-image:checkin-generated-files-86c363c -- working_directory: /go/src/github.com/grafana/tempo -+ - image: grafana/loki-build-image:rename-49e21d5-WIP -+ working_directory: /go/src/github.com/grafana/loki - - jobs: - test: -@@ -34,7 +34,7 @@ jobs: - - run: - name: Run Unit Tests - command: | -- touch tempo-build-image/.uptodate && -+ touch loki-build-image/.uptodate && - make BUILD_IN_CONTAINER=false test - - lint: -@@ -45,13 +45,13 @@ jobs: - - run: - name: Lint - command: | -- touch tempo-build-image/.uptodate && -+ touch loki-build-image/.uptodate && - make BUILD_IN_CONTAINER=false lint - - - run: - name: Check Generated Fies - command: | -- touch tempo-build-image/.uptodate && -+ touch loki-build-image/.uptodate && - make BUILD_IN_CONTAINER=false check-generated-files - - build: -@@ -63,17 +63,17 @@ jobs: - - run: - name: Build Images - command: | -- touch tempo-build-image/.uptodate && -+ touch loki-build-image/.uptodate && - make BUILD_IN_CONTAINER=false - - - run: - name: Save Images - command: | -- touch tempo-build-image/.uptodate && -+ touch loki-build-image/.uptodate && - make BUILD_IN_CONTAINER=false save-images - - - save_cache: -- key: v1-tempo-{{ .Branch }}-{{ .Revision }} -+ key: v1-loki-{{ .Branch }}-{{ .Revision }} - paths: - - images/ - -@@ -84,12 +84,12 @@ jobs: - - setup_remote_docker - - - restore_cache: -- key: v1-tempo-{{ .Branch }}-{{ .Revision }} -+ key: v1-loki-{{ .Branch }}-{{ .Revision }} - - - run: - name: Load Images - command: | -- touch tempo-build-image/.uptodate && -+ touch loki-build-image/.uptodate && - make BUILD_IN_CONTAINER=false load-images - - - run: -diff --git a/.gitignore b/.gitignore -index b5c50e799689d..b485f6fdfdb4c 100644 ---- a/.gitignore -+++ b/.gitignore -@@ -3,7 +3,7 @@ - .cache - *.output - mixin/vendor/ --cmd/tempo/tempo -+cmd/loki/loki - cmd/promtail/promtail --/tempo -+/loki - /promtail -diff --git a/Makefile b/Makefile -index 3d45c9a815633..932cf43541de1 100644 ---- a/Makefile -+++ b/Makefile -@@ -8,7 +8,7 @@ IMAGE_TAG := $(shell ./tools/image-tag) - UPTODATE := .uptodate - - # Building Docker images is now automated. The convention is every directory --# with a Dockerfile in it builds an image calls quay.io/grafana/tempo-. -+# with a Dockerfile in it builds an image calls quay.io/grafana/loki-. - # Dependencies (i.e. things that go in the image) still need to be explicitly - # declared. - %/$(UPTODATE): %/Dockerfile -@@ -17,7 +17,7 @@ UPTODATE := .uptodate - touch $@ - - # We don't want find to scan inside a bunch of directories, to accelerate the --# 'make: Entering directory '/go/src/github.com/grafana/tempo' phase. -+# 'make: Entering directory '/go/src/github.com/grafana/loki' phase. - DONT_FIND := -name tools -prune -o -name vendor -prune -o -name .git -prune -o -name .cache -prune -o -name .pkg -prune -o - - # Get a list of directories containing Dockerfiles -@@ -62,7 +62,7 @@ protos: $(PROTO_GOS) - yacc: $(YACC_GOS) - - # And now what goes into each image --tempo-build-image/$(UPTODATE): tempo-build-image/* -+loki-build-image/$(UPTODATE): loki-build-image/* - - # All the boiler plate for building golang follows: - SUDO := $(shell docker info >/dev/null 2>&1 || echo ""sudo -E"") -@@ -87,22 +87,22 @@ NETGO_CHECK = @strings $@ | grep cgo_stub\\\.go >/dev/null || { \ - - ifeq ($(BUILD_IN_CONTAINER),true) - --$(EXES) $(PROTO_GOS) $(YACC_GOS) lint test shell check-generated-files: tempo-build-image/$(UPTODATE) -+$(EXES) $(PROTO_GOS) $(YACC_GOS) lint test shell check-generated-files: loki-build-image/$(UPTODATE) - @mkdir -p $(shell pwd)/.pkg - @mkdir -p $(shell pwd)/.cache - $(SUDO) docker run $(RM) $(TTY) -i \ - -v $(shell pwd)/.cache:/go/cache \ - -v $(shell pwd)/.pkg:/go/pkg \ -- -v $(shell pwd):/go/src/github.com/grafana/tempo \ -- $(IMAGE_PREFIX)tempo-build-image $@; -+ -v $(shell pwd):/go/src/github.com/grafana/loki \ -+ $(IMAGE_PREFIX)loki-build-image $@; - - else - --$(EXES): tempo-build-image/$(UPTODATE) -+$(EXES): loki-build-image/$(UPTODATE) - go build $(GO_FLAGS) -o $@ ./$(@D) - $(NETGO_CHECK) - --%.pb.go: tempo-build-image/$(UPTODATE) -+%.pb.go: loki-build-image/$(UPTODATE) - case ""$@"" in \ - vendor*) \ - protoc -I ./vendor:./$(@D) --gogoslick_out=plugins=grpc:./vendor ./$(patsubst %.pb.go,%.proto,$@); \ -@@ -115,16 +115,16 @@ $(EXES): tempo-build-image/$(UPTODATE) - %.go: %.y - goyacc -p $(basename $(notdir $<)) -o $@ $< - --lint: tempo-build-image/$(UPTODATE) -+lint: loki-build-image/$(UPTODATE) - gometalinter ./... - --check-generated-files: tempo-build-image/$(UPTODATE) yacc protos -+check-generated-files: loki-build-image/$(UPTODATE) yacc protos - @git diff-files || (echo ""changed files; failing check"" && exit 1) - --test: tempo-build-image/$(UPTODATE) -+test: loki-build-image/$(UPTODATE) - go test ./... - --shell: tempo-build-image/$(UPTODATE) -+shell: loki-build-image/$(UPTODATE) - bash - - endif -@@ -157,5 +157,5 @@ push-images: - - clean: - $(SUDO) docker rmi $(IMAGE_NAMES) >/dev/null 2>&1 || true -- rm -rf $(UPTODATE_FILES) $(EXES) $(PROTO_GOS) $(YACC_GOS) .cache -+ rm -rf $(UPTODATE_FILES) $(EXES) .cache - go clean ./... -diff --git a/README.md b/README.md -index c04e1323b0d39..09f74abcd874b 100644 ---- a/README.md -+++ b/README.md -@@ -1,24 +1,24 @@ --# Tempo: Like Prometheus, but for logs. -+# Loki: Like Prometheus, but for logs. - --[![CircleCI](https://circleci.com/gh/grafana/tempo/tree/master.svg?style=svg&circle-token=618193e5787b2951c1ea3352ad5f254f4f52313d)](https://circleci.com/gh/grafana/tempo/tree/master) [Design doc](https://docs.google.com/document/d/11tjK_lvp1-SVsFZjgOTr1vV3-q6vBAsZYIQ5ZeYBkyM/edit) -+[![CircleCI](https://circleci.com/gh/grafana/loki/tree/master.svg?style=svg&circle-token=618193e5787b2951c1ea3352ad5f254f4f52313d)](https://circleci.com/gh/grafana/loki/tree/master) [Design doc](https://docs.google.com/document/d/11tjK_lvp1-SVsFZjgOTr1vV3-q6vBAsZYIQ5ZeYBkyM/edit) - --Tempo is a horizontally-scalable, highly-available, multi-tenant, log aggregation -+Loki is a horizontally-scalable, highly-available, multi-tenant, log aggregation - system inspired by Prometheus. It is designed to be very cost effective, as it does - not index the contents of the logs, but rather a set of labels for each log stream. - - ## Run it locally - --Tempo can be run in a single host, no-dependencies mode using the following commands. -+Loki can be run in a single host, no-dependencies mode using the following commands. - --Tempo consists of 3 components; `tempo` is the main server, responsible for storing -+Loki consists of 3 components; `loki` is the main server, responsible for storing - logs and processing queries. `promtail` is the agent, responsible for gather logs --and sending them to tempo and `grafana` as the UI. -+and sending them to loki and `grafana` as the UI. - --To run tempo, use the following commands: -+To run loki, use the following commands: - - ``` --$ go build ./cmd/tempo --$ ./tempo -config.file=./docs/tempo-local-config.yaml -+$ go build ./cmd/loki -+$ ./loki -config.file=./docs/loki-local-config.yaml - ... - ``` - -@@ -30,7 +30,7 @@ $ ./promtail -config.file=./docs/promtail-local-config.yaml - ... - ``` - --Grafana is Tempo's UI, so you'll also want to run one of those: -+Grafana is Loki's UI, so you'll also want to run one of those: - - ``` - $ docker run -ti -p 3000:3000 -e ""GF_EXPLORE_ENABLED=true"" grafana/grafana-dev:master-377eaa891c1eefdec9c83a2ee4dcf5c81665ab1f -@@ -40,12 +40,12 @@ In the Grafana UI (http://localhost:3000), loging with ""admin""/""admin"", add a ne - - ## Usage Instructions - --Tempo is running in the ops-tools1 cluster. You can query logs from that cluster -+Loki is running in the ops-tools1 cluster. You can query logs from that cluster - using the following commands: - - ``` --$ go get github.com/grafana/tempo/cmd/logcli --$ . $GOPATH/src/github.com/grafana/tempo/env # env vars inc. URL, username etc -+$ go get github.com/grafana/loki/cmd/logcli -+$ . $GOPATH/src/github.com/grafana/loki/env # env vars inc. URL, username etc - $ logcli labels job - https://logs-dev-ops-tools1.grafana.net/api/prom/label/job/values - cortex-ops/consul -@@ -58,14 +58,14 @@ Common labels: {job=""cortex-ops/consul"", namespace=""cortex-ops""} - 2018-06-25T12:52:09Z {instance=""consul-8576459955-pl75w""} 2018/06/25 12:52:09 [INFO] raft: Compacting logs from 456973 to 465169 - ``` - --The `logcli` command is temporary until we have Grafana integration. The URLs of -+The `logcli` command is lokirary until we have Grafana integration. The URLs of - the requests are printed to help with integration work. - - ``` - $ logcli help - usage: logcli [] [ ...] - --A command-line for tempo. -+A command-line for loki. - - Flags: - --help Show context-sensitive help (also try --help-long and --help-man). -diff --git a/cmd/logcli/client.go b/cmd/logcli/client.go -index f20084aa0e58d..e91080c821b30 100644 ---- a/cmd/logcli/client.go -+++ b/cmd/logcli/client.go -@@ -8,7 +8,7 @@ import ( - ""net/url"" - ""time"" - -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/logproto"" - ) - - const ( -diff --git a/cmd/logcli/labels.go b/cmd/logcli/labels.go -index 45204528b3d43..4f52fa4228599 100644 ---- a/cmd/logcli/labels.go -+++ b/cmd/logcli/labels.go -@@ -4,7 +4,7 @@ import ( - ""fmt"" - ""log"" - -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/logproto"" - ) - - func doLabels() { -diff --git a/cmd/logcli/main.go b/cmd/logcli/main.go -index ccf4254d26682..be6023ebdb7ef 100644 ---- a/cmd/logcli/main.go -+++ b/cmd/logcli/main.go -@@ -7,7 +7,7 @@ import ( - ) - - var ( -- app = kingpin.New(""logcli"", ""A command-line for tempo."") -+ app = kingpin.New(""logcli"", ""A command-line for loki."") - addr = app.Flag(""addr"", ""Server address."").Default(""https://log-us.grafana.net"").Envar(""GRAFANA_ADDR"").String() - username = app.Flag(""username"", ""Username for HTTP basic auth."").Default("""").Envar(""GRAFANA_USERNAME"").String() - password = app.Flag(""password"", ""Password for HTTP basic auth."").Default("""").Envar(""GRAFANA_PASSWORD"").String() -diff --git a/cmd/logcli/query.go b/cmd/logcli/query.go -index d28efff55594c..26cb92248b6d6 100644 ---- a/cmd/logcli/query.go -+++ b/cmd/logcli/query.go -@@ -9,9 +9,9 @@ import ( - ""github.com/fatih/color"" - ""github.com/prometheus/prometheus/pkg/labels"" - -- ""github.com/grafana/tempo/pkg/iter"" -- ""github.com/grafana/tempo/pkg/logproto"" -- ""github.com/grafana/tempo/pkg/parser"" -+ ""github.com/grafana/loki/pkg/iter"" -+ ""github.com/grafana/loki/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/parser"" - ) - - func doQuery() { -diff --git a/cmd/logcli/tail.go b/cmd/logcli/tail.go -index 9dac4a45060ff..356fee55e158b 100644 ---- a/cmd/logcli/tail.go -+++ b/cmd/logcli/tail.go -@@ -3,8 +3,8 @@ package main - import ( - ""time"" - -- ""github.com/grafana/tempo/pkg/iter"" -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/iter"" -+ ""github.com/grafana/loki/pkg/logproto"" - ) - - const tailIteratorIncrement = 10 * time.Second -diff --git a/cmd/loki/Dockerfile b/cmd/loki/Dockerfile -new file mode 100644 -index 0000000000000..af23bb6e3bb0f ---- /dev/null -+++ b/cmd/loki/Dockerfile -@@ -0,0 +1,4 @@ -+FROM alpine:3.4 -+COPY loki /bin/loki -+EXPOSE 80 -+ENTRYPOINT [ ""/bin/loki"" ] -diff --git a/cmd/tempo/main.go b/cmd/loki/main.go -similarity index 67% -rename from cmd/tempo/main.go -rename to cmd/loki/main.go -index a445063a2c076..4788352b6498c 100644 ---- a/cmd/tempo/main.go -+++ b/cmd/loki/main.go -@@ -5,8 +5,8 @@ import ( - ""os"" - - ""github.com/go-kit/kit/log/level"" -- ""github.com/grafana/tempo/pkg/helpers"" -- ""github.com/grafana/tempo/pkg/tempo"" -+ ""github.com/grafana/loki/pkg/helpers"" -+ ""github.com/grafana/loki/pkg/loki"" - - ""github.com/cortexproject/cortex/pkg/util"" - ""github.com/cortexproject/cortex/pkg/util/flagext"" -@@ -14,7 +14,7 @@ import ( - - func main() { - var ( -- cfg tempo.Config -+ cfg loki.Config - configFile = """" - ) - flag.StringVar(&configFile, ""config.file"", """", ""Configuration file to load."") -@@ -30,18 +30,18 @@ func main() { - } - } - -- t, err := tempo.New(cfg) -+ t, err := loki.New(cfg) - if err != nil { -- level.Error(util.Logger).Log(""msg"", ""error initialising tempo"", ""err"", err) -+ level.Error(util.Logger).Log(""msg"", ""error initialising loki"", ""err"", err) - os.Exit(1) - } - - if err := t.Run(); err != nil { -- level.Error(util.Logger).Log(""msg"", ""error running tempo"", ""err"", err) -+ level.Error(util.Logger).Log(""msg"", ""error running loki"", ""err"", err) - } - - if err := t.Stop(); err != nil { -- level.Error(util.Logger).Log(""msg"", ""error stopping tempo"", ""err"", err) -+ level.Error(util.Logger).Log(""msg"", ""error stopping loki"", ""err"", err) - os.Exit(1) - } - } -diff --git a/cmd/promtail/main.go b/cmd/promtail/main.go -index 0aae5ce37cd06..ec6f8d1cf16e0 100644 ---- a/cmd/promtail/main.go -+++ b/cmd/promtail/main.go -@@ -9,8 +9,8 @@ import ( - ""github.com/cortexproject/cortex/pkg/util"" - ""github.com/cortexproject/cortex/pkg/util/flagext"" - -- ""github.com/grafana/tempo/pkg/helpers"" -- ""github.com/grafana/tempo/pkg/promtail"" -+ ""github.com/grafana/loki/pkg/helpers"" -+ ""github.com/grafana/loki/pkg/promtail"" - ) - - func main() { -@@ -33,12 +33,12 @@ func main() { - - p, err := promtail.New(config) - if err != nil { -- level.Error(util.Logger).Log(""msg"", ""error creating tempo"", ""error"", err) -+ level.Error(util.Logger).Log(""msg"", ""error creating loki"", ""error"", err) - os.Exit(1) - } - - if err := p.Run(); err != nil { -- level.Error(util.Logger).Log(""msg"", ""error starting tempo"", ""error"", err) -+ level.Error(util.Logger).Log(""msg"", ""error starting loki"", ""error"", err) - os.Exit(1) - } - -diff --git a/cmd/tempo/Dockerfile b/cmd/tempo/Dockerfile -deleted file mode 100644 -index 07a1affd210f3..0000000000000 ---- a/cmd/tempo/Dockerfile -+++ /dev/null -@@ -1,4 +0,0 @@ --FROM alpine:3.4 --COPY tempo /bin/tempo --EXPOSE 80 --ENTRYPOINT [ ""/bin/tempo"" ] -diff --git a/docs/tempo-local-config.yaml b/docs/loki-local-config.yaml -similarity index 100% -rename from docs/tempo-local-config.yaml -rename to docs/loki-local-config.yaml -diff --git a/tempo-build-image/Dockerfile b/loki-build-image/Dockerfile -similarity index 100% -rename from tempo-build-image/Dockerfile -rename to loki-build-image/Dockerfile -diff --git a/tempo-build-image/build.sh b/loki-build-image/build.sh -similarity index 92% -rename from tempo-build-image/build.sh -rename to loki-build-image/build.sh -index f483f4ef2d573..76901c4fa41e6 100755 ---- a/tempo-build-image/build.sh -+++ b/loki-build-image/build.sh -@@ -2,7 +2,7 @@ - - set -eu - --SRC_PATH=$GOPATH/src/github.com/grafana/tempo -+SRC_PATH=$GOPATH/src/github.com/grafana/loki - - # If we run make directly, any files created on the bind mount - # will have awkward ownership. So we switch to a user with the -diff --git a/mixin/alerts.libsonnet b/mixin/alerts.libsonnet -index 555a58ff5ad10..53bd73e2e0fb7 100644 ---- a/mixin/alerts.libsonnet -+++ b/mixin/alerts.libsonnet -@@ -2,14 +2,14 @@ - prometheusAlerts+:: { - groups+: [ - { -- name: 'tempo_alerts', -+ name: 'loki_alerts', - rules: [ - { -- alert: 'TempoRequestErrors', -+ alert: 'LokiRequestErrors', - expr: ||| -- 100 * sum(rate(tempo_request_duration_seconds_count{status_code=~""5..""}[1m])) by (namespace, job, route) -+ 100 * sum(rate(loki_request_duration_seconds_count{status_code=~""5..""}[1m])) by (namespace, job, route) - / -- sum(rate(tempo_request_duration_seconds_count[1m])) by (namespace, job, route) -+ sum(rate(loki_request_duration_seconds_count[1m])) by (namespace, job, route) - > 10 - |||, - 'for': '15m', -@@ -23,9 +23,9 @@ - }, - }, - { -- alert: 'TempoRequestLatency', -+ alert: 'LokiRequestLatency', - expr: ||| -- namespace_job_route:tempo_request_duration_seconds:99quantile > 1 -+ namespace_job_route:loki_request_duration_seconds:99quantile > 1 - |||, - 'for': '15m', - labels: { -@@ -40,7 +40,7 @@ - ], - }, - { -- name: 'tempo_frontend_alerts', -+ name: 'loki_frontend_alerts', - rules: [ - { - alert: 'FrontendRequestErrors', -diff --git a/mixin/dashboards.libsonnet b/mixin/dashboards.libsonnet -index 2c49f2f6762b9..4ea4bf962b026 100644 ---- a/mixin/dashboards.libsonnet -+++ b/mixin/dashboards.libsonnet -@@ -2,10 +2,10 @@ local g = import 'grafana-builder/grafana.libsonnet'; - - { - dashboards+: { -- 'tempo-writes.json': -- g.dashboard('Tempo / Writes') -- .addTemplate('cluster', 'kube_pod_container_info{image=~"".*tempo.*""}', 'cluster') -- .addTemplate('namespace', 'kube_pod_container_info{image=~"".*tempo.*""}', 'namespace') -+ 'loki-writes.json': -+ g.dashboard('Loki / Writes') -+ .addTemplate('cluster', 'kube_pod_container_info{image=~"".*loki.*""}', 'cluster') -+ .addTemplate('namespace', 'kube_pod_container_info{image=~"".*loki.*""}', 'namespace') - .addRow( - g.row('Frontend (cortex_gw)') - .addPanel( -@@ -21,29 +21,29 @@ local g = import 'grafana-builder/grafana.libsonnet'; - g.row('Distributor') - .addPanel( - g.panel('QPS') + -- g.qpsPanel('tempo_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/distributor"", route=""api_prom_push""}') -+ g.qpsPanel('loki_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/distributor"", route=""api_prom_push""}') - ) - .addPanel( - g.panel('Latency') + -- g.latencyRecordingRulePanel('tempo_request_duration_seconds', [g.selector.eq('job', '$namespace/distributor'), g.selector.eq('route', 'api_prom_push')], extra_selectors=[g.selector.eq('cluster', '$cluster')]) -+ g.latencyRecordingRulePanel('loki_request_duration_seconds', [g.selector.eq('job', '$namespace/distributor'), g.selector.eq('route', 'api_prom_push')], extra_selectors=[g.selector.eq('cluster', '$cluster')]) - ) - ) - .addRow( - g.row('Ingester') - .addPanel( - g.panel('QPS') + -- g.qpsPanel('tempo_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/ingester"",route=""/logproto.Pusher/Push""}') -+ g.qpsPanel('loki_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/ingester"",route=""/logproto.Pusher/Push""}') - ) - .addPanel( - g.panel('Latency') + -- g.latencyRecordingRulePanel('tempo_request_duration_seconds', [g.selector.eq('job', '$namespace/ingester'), g.selector.eq('route', '/logproto.Pusher/Push')], extra_selectors=[g.selector.eq('cluster', '$cluster')]) -+ g.latencyRecordingRulePanel('loki_request_duration_seconds', [g.selector.eq('job', '$namespace/ingester'), g.selector.eq('route', '/logproto.Pusher/Push')], extra_selectors=[g.selector.eq('cluster', '$cluster')]) - ) - ), - -- 'tempo-reads.json': -- g.dashboard('tempo / Reads') -- .addTemplate('cluster', 'kube_pod_container_info{image=~"".*tempo.*""}', 'cluster') -- .addTemplate('namespace', 'kube_pod_container_info{image=~"".*tempo.*""}', 'namespace') -+ 'loki-reads.json': -+ g.dashboard('loki / Reads') -+ .addTemplate('cluster', 'kube_pod_container_info{image=~"".*loki.*""}', 'cluster') -+ .addTemplate('namespace', 'kube_pod_container_info{image=~"".*loki.*""}', 'namespace') - .addRow( - g.row('Frontend (cortex_gw)') - .addPanel( -@@ -59,83 +59,83 @@ local g = import 'grafana-builder/grafana.libsonnet'; - g.row('Querier') - .addPanel( - g.panel('QPS') + -- g.qpsPanel('tempo_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/querier""}') -+ g.qpsPanel('loki_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/querier""}') - ) - .addPanel( - g.panel('Latency') + -- g.latencyRecordingRulePanel('tempo_request_duration_seconds', [g.selector.eq('job', '$namespace/querier')], extra_selectors=[g.selector.eq('cluster', '$cluster')]) -+ g.latencyRecordingRulePanel('loki_request_duration_seconds', [g.selector.eq('job', '$namespace/querier')], extra_selectors=[g.selector.eq('cluster', '$cluster')]) - ) - ) - .addRow( - g.row('Ingester') - .addPanel( - g.panel('QPS') + -- g.qpsPanel('tempo_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/ingester"",route!~""/logproto.Pusher/Push|metrics|ready|traces""}') -+ g.qpsPanel('loki_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/ingester"",route!~""/logproto.Pusher/Push|metrics|ready|traces""}') - ) - .addPanel( - g.panel('Latency') + -- g.latencyRecordingRulePanel('tempo_request_duration_seconds', [g.selector.eq('job', '$namespace/ingester'), g.selector.nre('route', '/logproto.Pusher/Push|metrics|ready')], extra_selectors=[g.selector.eq('cluster', '$cluster')]) -+ g.latencyRecordingRulePanel('loki_request_duration_seconds', [g.selector.eq('job', '$namespace/ingester'), g.selector.nre('route', '/logproto.Pusher/Push|metrics|ready')], extra_selectors=[g.selector.eq('cluster', '$cluster')]) - ) - ), - - -- 'tempo-chunks.json': -- g.dashboard('Tempo / Chunks') -- .addTemplate('cluster', 'kube_pod_container_info{image=~"".*tempo.*""}', 'cluster') -- .addTemplate('namespace', 'kube_pod_container_info{image=~"".*tempo.*""}', 'namespace') -+ 'loki-chunks.json': -+ g.dashboard('Loki / Chunks') -+ .addTemplate('cluster', 'kube_pod_container_info{image=~"".*loki.*""}', 'cluster') -+ .addTemplate('namespace', 'kube_pod_container_info{image=~"".*loki.*""}', 'namespace') - .addRow( - g.row('Active Series / Chunks') - .addPanel( - g.panel('Series') + -- g.queryPanel('sum(tempo_ingester_memory_chunks{cluster=""$cluster"", job=""$namespace/ingester""})', 'series'), -+ g.queryPanel('sum(loki_ingester_memory_chunks{cluster=""$cluster"", job=""$namespace/ingester""})', 'series'), - ) - .addPanel( - g.panel('Chunks per series') + -- g.queryPanel('sum(tempo_ingester_memory_chunks{cluster=""$cluster"", job=""$namespace/ingester""}) / sum(tempo_ingester_memory_series{job=""$namespace/ingester""})', 'chunks'), -+ g.queryPanel('sum(loki_ingester_memory_chunks{cluster=""$cluster"", job=""$namespace/ingester""}) / sum(loki_ingester_memory_series{job=""$namespace/ingester""})', 'chunks'), - ) - ) - .addRow( - g.row('Flush Stats') - .addPanel( - g.panel('Utilization') + -- g.latencyPanel('tempo_ingester_chunk_utilization', '{cluster=""$cluster"", job=""$namespace/ingester""}', multiplier='1') + -+ g.latencyPanel('loki_ingester_chunk_utilization', '{cluster=""$cluster"", job=""$namespace/ingester""}', multiplier='1') + - { yaxes: g.yaxes('percentunit') }, - ) - .addPanel( - g.panel('Age') + -- g.latencyPanel('tempo_ingester_chunk_age_seconds', '{cluster=""$cluster"", job=""$namespace/ingester""}'), -+ g.latencyPanel('loki_ingester_chunk_age_seconds', '{cluster=""$cluster"", job=""$namespace/ingester""}'), - ), - ) - .addRow( - g.row('Flush Stats') - .addPanel( - g.panel('Size') + -- g.latencyPanel('tempo_ingester_chunk_length', '{cluster=""$cluster"", job=""$namespace/ingester""}', multiplier='1') + -+ g.latencyPanel('loki_ingester_chunk_length', '{cluster=""$cluster"", job=""$namespace/ingester""}', multiplier='1') + - { yaxes: g.yaxes('short') }, - ) - .addPanel( - g.panel('Entries') + -- g.queryPanel('sum(rate(tempo_chunk_store_index_entries_per_chunk_sum{cluster=""$cluster"", job=""$namespace/ingester""}[5m])) / sum(rate(tempo_chunk_store_index_entries_per_chunk_count{cluster=""$cluster"", job=""$namespace/ingester""}[5m]))', 'entries'), -+ g.queryPanel('sum(rate(loki_chunk_store_index_entries_per_chunk_sum{cluster=""$cluster"", job=""$namespace/ingester""}[5m])) / sum(rate(loki_chunk_store_index_entries_per_chunk_count{cluster=""$cluster"", job=""$namespace/ingester""}[5m]))', 'entries'), - ), - ) - .addRow( - g.row('Flush Stats') - .addPanel( - g.panel('Queue Length') + -- g.queryPanel('tempo_ingester_flush_queue_length{cluster=""$cluster"", job=""$namespace/ingester""}', '{{instance}}'), -+ g.queryPanel('loki_ingester_flush_queue_length{cluster=""$cluster"", job=""$namespace/ingester""}', '{{instance}}'), - ) - .addPanel( - g.panel('Flush Rate') + -- g.qpsPanel('tempo_ingester_chunk_age_seconds_count{cluster=""$cluster"", job=""$namespace/ingester""}'), -+ g.qpsPanel('loki_ingester_chunk_age_seconds_count{cluster=""$cluster"", job=""$namespace/ingester""}'), - ), - ), - -- 'tempo-frontend.json': -- g.dashboard('Tempo / Frontend') -- .addTemplate('cluster', 'kube_pod_container_info{image=~"".*tempo.*""}', 'cluster') -- .addTemplate('namespace', 'kube_pod_container_info{image=~"".*tempo.*""}', 'namespace') -+ 'loki-frontend.json': -+ g.dashboard('Loki / Frontend') -+ .addTemplate('cluster', 'kube_pod_container_info{image=~"".*loki.*""}', 'cluster') -+ .addTemplate('namespace', 'kube_pod_container_info{image=~"".*loki.*""}', 'namespace') - .addRow( -- g.row('tempo Reqs (cortex_gw)') -+ g.row('loki Reqs (cortex_gw)') - .addPanel( - g.panel('QPS') + - g.qpsPanel('cortex_gw_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/cortex-gw""}') -@@ -146,9 +146,9 @@ local g = import 'grafana-builder/grafana.libsonnet'; - ) - ), - 'promtail.json': -- g.dashboard('Tempo / Promtail') -- .addTemplate('cluster', 'kube_pod_container_info{image=~"".*tempo.*""}', 'cluster') -- .addTemplate('namespace', 'kube_pod_container_info{image=~"".*tempo.*""}', 'namespace') -+ g.dashboard('Loki / Promtail') -+ .addTemplate('cluster', 'kube_pod_container_info{image=~"".*loki.*""}', 'cluster') -+ .addTemplate('namespace', 'kube_pod_container_info{image=~"".*loki.*""}', 'namespace') - .addRow( - g.row('promtail Reqs') - .addPanel( -diff --git a/mixin/recording_rules.libsonnet b/mixin/recording_rules.libsonnet -index 95d7e9f54ff83..2f639046eca16 100644 ---- a/mixin/recording_rules.libsonnet -+++ b/mixin/recording_rules.libsonnet -@@ -22,13 +22,13 @@ local histogramRules(metric, labels) = - { - prometheus_rules+:: { - groups+: [{ -- name: 'tempo_rules', -+ name: 'loki_rules', - rules: -- histogramRules('tempo_request_duration_seconds', ['job']) + -- histogramRules('tempo_request_duration_seconds', ['job', 'route']) + -- histogramRules('tempo_request_duration_seconds', ['namespace', 'job', 'route']), -+ histogramRules('loki_request_duration_seconds', ['job']) + -+ histogramRules('loki_request_duration_seconds', ['job', 'route']) + -+ histogramRules('loki_request_duration_seconds', ['namespace', 'job', 'route']), - }, { -- name: 'tempo_frontend_rules', -+ name: 'loki_frontend_rules', - rules: - histogramRules('cortex_gw_request_duration_seconds', ['job']) + - histogramRules('cortex_gw_request_duration_seconds', ['job', 'route']) + -diff --git a/pkg/chunkenc/dumb_chunk.go b/pkg/chunkenc/dumb_chunk.go -index 08f0b7eb98983..65f519a534155 100644 ---- a/pkg/chunkenc/dumb_chunk.go -+++ b/pkg/chunkenc/dumb_chunk.go -@@ -5,8 +5,8 @@ import ( - ""sort"" - ""time"" - -- ""github.com/grafana/tempo/pkg/iter"" -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/iter"" -+ ""github.com/grafana/loki/pkg/logproto"" - ) - - const ( -diff --git a/pkg/chunkenc/gzip.go b/pkg/chunkenc/gzip.go -index 8192516c2ada8..26d86925e8138 100644 ---- a/pkg/chunkenc/gzip.go -+++ b/pkg/chunkenc/gzip.go -@@ -11,9 +11,9 @@ import ( - ""math"" - ""time"" - -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/logproto"" - -- ""github.com/grafana/tempo/pkg/iter"" -+ ""github.com/grafana/loki/pkg/iter"" - - ""github.com/pkg/errors"" - ) -diff --git a/pkg/chunkenc/gzip_test.go b/pkg/chunkenc/gzip_test.go -index 48bc1f2ae4604..c316094bf9d8e 100644 ---- a/pkg/chunkenc/gzip_test.go -+++ b/pkg/chunkenc/gzip_test.go -@@ -8,7 +8,7 @@ import ( - ""testing"" - ""time"" - -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/logproto"" - - ""github.com/stretchr/testify/require"" - ) -diff --git a/pkg/chunkenc/interface.go b/pkg/chunkenc/interface.go -index adfe01aa0fb54..4be23940e57d1 100644 ---- a/pkg/chunkenc/interface.go -+++ b/pkg/chunkenc/interface.go -@@ -5,8 +5,8 @@ import ( - ""io"" - ""time"" - -- ""github.com/grafana/tempo/pkg/iter"" -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/iter"" -+ ""github.com/grafana/loki/pkg/logproto"" - ) - - // Errors returned by the chunk interface. -diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go -index c04090ed8b010..f22d498c03235 100644 ---- a/pkg/distributor/distributor.go -+++ b/pkg/distributor/distributor.go -@@ -14,24 +14,24 @@ import ( - ""github.com/weaveworks/common/user"" - ""google.golang.org/grpc/health/grpc_health_v1"" - -- ""github.com/grafana/tempo/pkg/ingester/client"" -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/ingester/client"" -+ ""github.com/grafana/loki/pkg/logproto"" - ) - - var ( - sendDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ -- Namespace: ""tempo"", -+ Namespace: ""loki"", - Name: ""distributor_send_duration_seconds"", - Help: ""Time spent sending a sample batch to multiple replicated ingesters."", - Buckets: []float64{.001, .0025, .005, .01, .025, .05, .1, .25, .5, 1}, - }, []string{""method"", ""status_code""}) - ingesterAppends = prometheus.NewCounterVec(prometheus.CounterOpts{ -- Namespace: ""tempo"", -+ Namespace: ""loki"", - Name: ""distributor_ingester_appends_total"", - Help: ""The total number of batch appends sent to ingesters."", - }, []string{""ingester""}) - ingesterAppendFailures = prometheus.NewCounterVec(prometheus.CounterOpts{ -- Namespace: ""tempo"", -+ Namespace: ""loki"", - Name: ""distributor_ingester_append_failures_total"", - Help: ""The total number of failed batch appends sent to ingesters."", - }, []string{""ingester""}) -diff --git a/pkg/distributor/http.go b/pkg/distributor/http.go -index 159fc01979bc9..5761e08dc5836 100644 ---- a/pkg/distributor/http.go -+++ b/pkg/distributor/http.go -@@ -5,7 +5,7 @@ import ( - - ""github.com/cortexproject/cortex/pkg/util"" - -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/logproto"" - ) - - // PushHandler reads a snappy-compressed proto from the HTTP body. -diff --git a/pkg/ingester/chunk_test.go b/pkg/ingester/chunk_test.go -index e42be4988cc56..83d3588c1c128 100644 ---- a/pkg/ingester/chunk_test.go -+++ b/pkg/ingester/chunk_test.go -@@ -6,9 +6,9 @@ import ( - ""testing"" - ""time"" - -- ""github.com/grafana/tempo/pkg/chunkenc"" -- ""github.com/grafana/tempo/pkg/iter"" -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/chunkenc"" -+ ""github.com/grafana/loki/pkg/iter"" -+ ""github.com/grafana/loki/pkg/logproto"" - ""github.com/stretchr/testify/assert"" - ""github.com/stretchr/testify/require"" - ) -diff --git a/pkg/ingester/client/client.go b/pkg/ingester/client/client.go -index b5e32e95d17c9..97446c0098077 100644 ---- a/pkg/ingester/client/client.go -+++ b/pkg/ingester/client/client.go -@@ -6,7 +6,7 @@ import ( - ""time"" - - cortex_client ""github.com/cortexproject/cortex/pkg/ingester/client"" -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/logproto"" - ""github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"" - ""github.com/mwitkow/go-grpc-middleware"" - opentracing ""github.com/opentracing/opentracing-go"" -diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go -index 4980e30f3af41..53f4cd29663b4 100644 ---- a/pkg/ingester/ingester.go -+++ b/pkg/ingester/ingester.go -@@ -10,7 +10,7 @@ import ( - ""github.com/weaveworks/common/user"" - ""google.golang.org/grpc/health/grpc_health_v1"" - -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/logproto"" - ) - - // Config for an ingester. -diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go -index 26e7cd4fac40d..763775fd7bfab 100644 ---- a/pkg/ingester/instance.go -+++ b/pkg/ingester/instance.go -@@ -8,11 +8,11 @@ import ( - ""github.com/pkg/errors"" - ""github.com/prometheus/client_golang/prometheus"" - -- ""github.com/grafana/tempo/pkg/helpers"" -- ""github.com/grafana/tempo/pkg/iter"" -- ""github.com/grafana/tempo/pkg/logproto"" -- ""github.com/grafana/tempo/pkg/parser"" -- ""github.com/grafana/tempo/pkg/querier"" -+ ""github.com/grafana/loki/pkg/helpers"" -+ ""github.com/grafana/loki/pkg/iter"" -+ ""github.com/grafana/loki/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/parser"" -+ ""github.com/grafana/loki/pkg/querier"" - ) - - const queryBatchSize = 128 -@@ -24,12 +24,12 @@ var ( - - var ( - streamsCreatedTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ -- Namespace: ""tempo"", -+ Namespace: ""loki"", - Name: ""ingester_streams_created_total"", - Help: ""The total number of streams created in the ingester."", - }, []string{""org""}) - streamsRemovedTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ -- Namespace: ""tempo"", -+ Namespace: ""loki"", - Name: ""ingester_streams_removed_total"", - Help: ""The total number of streams removed by the ingester."", - }, []string{""org""}) -diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go -index 6ea436a112ac2..87e3516d5feed 100644 ---- a/pkg/ingester/stream.go -+++ b/pkg/ingester/stream.go -@@ -7,25 +7,25 @@ import ( - ""github.com/prometheus/client_golang/prometheus"" - ""github.com/prometheus/prometheus/pkg/labels"" - -- ""github.com/grafana/tempo/pkg/chunkenc"" -- ""github.com/grafana/tempo/pkg/iter"" -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/chunkenc"" -+ ""github.com/grafana/loki/pkg/iter"" -+ ""github.com/grafana/loki/pkg/logproto"" - ) - - var ( - chunksCreatedTotal = prometheus.NewCounter(prometheus.CounterOpts{ -- Namespace: ""tempo"", -+ Namespace: ""loki"", - Name: ""ingester_chunks_created_total"", - Help: ""The total number of chunks created in the ingester."", - }) - chunksFlushedTotal = prometheus.NewCounter(prometheus.CounterOpts{ -- Namespace: ""tempo"", -+ Namespace: ""loki"", - Name: ""ingester_chunks_flushed_total"", - Help: ""The total number of chunks flushed by the ingester."", - }) - - samplesPerChunk = prometheus.NewHistogram(prometheus.HistogramOpts{ -- Namespace: ""tempo"", -+ Namespace: ""loki"", - Subsystem: ""ingester"", - Name: ""samples_per_chunk"", - Help: ""The number of samples in a chunk."", -diff --git a/pkg/ingester/stream_test.go b/pkg/ingester/stream_test.go -index 6bb5bc9c774de..549e12cb5620f 100644 ---- a/pkg/ingester/stream_test.go -+++ b/pkg/ingester/stream_test.go -@@ -6,8 +6,8 @@ import ( - ""testing"" - ""time"" - -- ""github.com/grafana/tempo/pkg/chunkenc"" -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/chunkenc"" -+ ""github.com/grafana/loki/pkg/logproto"" - ""github.com/stretchr/testify/require"" - ) - -diff --git a/pkg/iter/iterator.go b/pkg/iter/iterator.go -index e586229033c75..fac7364acbc3f 100644 ---- a/pkg/iter/iterator.go -+++ b/pkg/iter/iterator.go -@@ -7,8 +7,8 @@ import ( - ""regexp"" - ""time"" - -- ""github.com/grafana/tempo/pkg/helpers"" -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/helpers"" -+ ""github.com/grafana/loki/pkg/logproto"" - ) - - // EntryIterator iterates over entries in time-order. -diff --git a/pkg/iter/iterator_test.go b/pkg/iter/iterator_test.go -index 529ae169212ef..d2250b84f563a 100644 ---- a/pkg/iter/iterator_test.go -+++ b/pkg/iter/iterator_test.go -@@ -5,7 +5,7 @@ import ( - ""testing"" - ""time"" - -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/logproto"" - ""github.com/stretchr/testify/assert"" - ) - -diff --git a/pkg/tempo/fake_auth.go b/pkg/loki/fake_auth.go -similarity index 98% -rename from pkg/tempo/fake_auth.go -rename to pkg/loki/fake_auth.go -index 849f513b59d30..25ae44e2d2c0b 100644 ---- a/pkg/tempo/fake_auth.go -+++ b/pkg/loki/fake_auth.go -@@ -1,4 +1,4 @@ --package tempo -+package loki - - import ( - ""context"" -diff --git a/pkg/tempo/tempo.go b/pkg/loki/loki.go -similarity index 78% -rename from pkg/tempo/tempo.go -rename to pkg/loki/loki.go -index 185ae43ba836e..bb75e4b3872fb 100644 ---- a/pkg/tempo/tempo.go -+++ b/pkg/loki/loki.go -@@ -1,4 +1,4 @@ --package tempo -+package loki - - import ( - ""flag"" -@@ -13,13 +13,13 @@ import ( - ""github.com/weaveworks/common/middleware"" - ""github.com/weaveworks/common/server"" - -- ""github.com/grafana/tempo/pkg/distributor"" -- ""github.com/grafana/tempo/pkg/ingester"" -- ""github.com/grafana/tempo/pkg/ingester/client"" -- ""github.com/grafana/tempo/pkg/querier"" -+ ""github.com/grafana/loki/pkg/distributor"" -+ ""github.com/grafana/loki/pkg/ingester"" -+ ""github.com/grafana/loki/pkg/ingester/client"" -+ ""github.com/grafana/loki/pkg/querier"" - ) - --// Config is the root config for Tempo. -+// Config is the root config for Loki. - type Config struct { - Target moduleName `yaml:""target,omitempty""` - AuthEnabled bool `yaml:""auth_enabled,omitempty""` -@@ -33,7 +33,7 @@ type Config struct { - - // RegisterFlags registers flag. - func (c *Config) RegisterFlags(f *flag.FlagSet) { -- c.Server.MetricsNamespace = ""tempo"" -+ c.Server.MetricsNamespace = ""loki"" - c.Target = All - f.Var(&c.Target, ""target"", ""target module (default All)"") - f.BoolVar(&c.AuthEnabled, ""auth.enabled"", true, ""Set to false to disable auth."") -@@ -45,8 +45,8 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { - c.Ingester.RegisterFlags(f) - } - --// Tempo is the root datastructure for Tempo. --type Tempo struct { -+// Loki is the root datastructure for Loki. -+type Loki struct { - cfg Config - - server *server.Server -@@ -60,23 +60,23 @@ type Tempo struct { - inited map[moduleName]struct{} - } - --// New makes a new Tempo. --func New(cfg Config) (*Tempo, error) { -- tempo := &Tempo{ -+// New makes a new Loki. -+func New(cfg Config) (*Loki, error) { -+ loki := &Loki{ - cfg: cfg, - inited: map[moduleName]struct{}{}, - } - -- tempo.setupAuthMiddleware() -+ loki.setupAuthMiddleware() - -- if err := tempo.init(cfg.Target); err != nil { -+ if err := loki.init(cfg.Target); err != nil { - return nil, err - } - -- return tempo, nil -+ return loki, nil - } - --func (t *Tempo) setupAuthMiddleware() { -+func (t *Loki) setupAuthMiddleware() { - if t.cfg.AuthEnabled { - t.cfg.Server.GRPCMiddleware = []grpc.UnaryServerInterceptor{ - middleware.ServerUserHeaderInterceptor, -@@ -96,7 +96,7 @@ func (t *Tempo) setupAuthMiddleware() { - } - } - --func (t *Tempo) init(m moduleName) error { -+func (t *Loki) init(m moduleName) error { - if _, ok := t.inited[m]; ok { - return nil - } -@@ -118,19 +118,19 @@ func (t *Tempo) init(m moduleName) error { - return nil - } - --// Run starts Tempo running, and blocks until a signal is received. --func (t *Tempo) Run() error { -+// Run starts Loki running, and blocks until a signal is received. -+func (t *Loki) Run() error { - return t.server.Run() - } - --// Stop gracefully stops a Tempo. --func (t *Tempo) Stop() error { -+// Stop gracefully stops a Loki. -+func (t *Loki) Stop() error { - t.server.Shutdown() - t.stop(t.cfg.Target) - return nil - } - --func (t *Tempo) stop(m moduleName) { -+func (t *Loki) stop(m moduleName) { - if _, ok := t.inited[m]; !ok { - return - } -diff --git a/pkg/tempo/modules.go b/pkg/loki/modules.go -similarity index 82% -rename from pkg/tempo/modules.go -rename to pkg/loki/modules.go -index cf7aa16ee21b5..b7c267e82dc58 100644 ---- a/pkg/tempo/modules.go -+++ b/pkg/loki/modules.go -@@ -1,4 +1,4 @@ --package tempo -+package loki - - import ( - ""fmt"" -@@ -13,15 +13,15 @@ import ( - ""github.com/weaveworks/common/middleware"" - ""github.com/weaveworks/common/server"" - -- ""github.com/grafana/tempo/pkg/distributor"" -- ""github.com/grafana/tempo/pkg/ingester"" -- ""github.com/grafana/tempo/pkg/logproto"" -- ""github.com/grafana/tempo/pkg/querier"" -+ ""github.com/grafana/loki/pkg/distributor"" -+ ""github.com/grafana/loki/pkg/ingester"" -+ ""github.com/grafana/loki/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/querier"" - ) - - type moduleName int - --// The various modules that make up Tempo. -+// The various modules that make up Loki. - const ( - Ring moduleName = iota - Server -@@ -75,12 +75,12 @@ func (m *moduleName) Set(s string) error { - } - } - --func (t *Tempo) initServer() (err error) { -+func (t *Loki) initServer() (err error) { - t.server, err = server.New(t.cfg.Server) - return - } - --func (t *Tempo) initRing() (err error) { -+func (t *Loki) initRing() (err error) { - t.ring, err = ring.New(t.cfg.Ingester.LifecyclerConfig.RingConfig) - if err != nil { - return -@@ -89,7 +89,7 @@ func (t *Tempo) initRing() (err error) { - return - } - --func (t *Tempo) initDistributor() (err error) { -+func (t *Loki) initDistributor() (err error) { - t.distributor, err = distributor.New(t.cfg.Distributor, t.cfg.IngesterClient, t.ring) - if err != nil { - return -@@ -108,7 +108,7 @@ func (t *Tempo) initDistributor() (err error) { - return - } - --func (t *Tempo) initQuerier() (err error) { -+func (t *Loki) initQuerier() (err error) { - t.querier, err = querier.New(t.cfg.Querier, t.cfg.IngesterClient, t.ring) - if err != nil { - return -@@ -129,7 +129,7 @@ func (t *Tempo) initQuerier() (err error) { - return - } - --func (t *Tempo) initIngester() (err error) { -+func (t *Loki) initIngester() (err error) { - t.cfg.Ingester.LifecyclerConfig.ListenPort = &t.cfg.Server.GRPCListenPort - t.ingester, err = ingester.New(t.cfg.Ingester) - if err != nil { -@@ -143,41 +143,41 @@ func (t *Tempo) initIngester() (err error) { - return - } - --func (t *Tempo) stopIngester() error { -+func (t *Loki) stopIngester() error { - t.ingester.Shutdown() - return nil - } - - type module struct { - deps []moduleName -- init func(t *Tempo) error -- stop func(t *Tempo) error -+ init func(t *Loki) error -+ stop func(t *Loki) error - } - - var modules = map[moduleName]module{ - Server: { -- init: (*Tempo).initServer, -+ init: (*Loki).initServer, - }, - - Ring: { - deps: []moduleName{Server}, -- init: (*Tempo).initRing, -+ init: (*Loki).initRing, - }, - - Distributor: { - deps: []moduleName{Ring, Server}, -- init: (*Tempo).initDistributor, -+ init: (*Loki).initDistributor, - }, - - Ingester: { - deps: []moduleName{Server}, -- init: (*Tempo).initIngester, -- stop: (*Tempo).stopIngester, -+ init: (*Loki).initIngester, -+ stop: (*Loki).stopIngester, - }, - - Querier: { - deps: []moduleName{Ring, Server}, -- init: (*Tempo).initQuerier, -+ init: (*Loki).initQuerier, - }, - - All: { -diff --git a/pkg/parser/labels.go b/pkg/parser/labels.go -index 6863c1963cfc2..b7dd7312fbe4b 100644 ---- a/pkg/parser/labels.go -+++ b/pkg/parser/labels.go -@@ -6,6 +6,7 @@ package parser - import __yyfmt__ ""fmt"" - - //line pkg/parser/labels.y:2 -+ - import ( - ""github.com/prometheus/prometheus/pkg/labels"" - ) -diff --git a/pkg/promtail/client.go b/pkg/promtail/client.go -index af5a5394d9dc6..b703fdf41f64e 100644 ---- a/pkg/promtail/client.go -+++ b/pkg/promtail/client.go -@@ -18,7 +18,7 @@ import ( - - ""github.com/cortexproject/cortex/pkg/util/flagext"" - -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/logproto"" - ) - - const contentType = ""application/x-protobuf"" -diff --git a/pkg/promtail/target.go b/pkg/promtail/target.go -index c329ee61d85c3..285795b663a33 100644 ---- a/pkg/promtail/target.go -+++ b/pkg/promtail/target.go -@@ -13,7 +13,7 @@ import ( - ""github.com/prometheus/common/model"" - fsnotify ""gopkg.in/fsnotify.v1"" - -- ""github.com/grafana/tempo/pkg/helpers"" -+ ""github.com/grafana/loki/pkg/helpers"" - ) - - var ( -diff --git a/pkg/promtail/targetmanager.go b/pkg/promtail/targetmanager.go -index 012fc1723995b..83dce302dab95 100644 ---- a/pkg/promtail/targetmanager.go -+++ b/pkg/promtail/targetmanager.go -@@ -7,7 +7,7 @@ import ( - - ""github.com/go-kit/kit/log"" - ""github.com/go-kit/kit/log/level"" -- ""github.com/grafana/tempo/pkg/helpers"" -+ ""github.com/grafana/loki/pkg/helpers"" - ""github.com/prometheus/common/model"" - ""github.com/prometheus/prometheus/config"" - ""github.com/prometheus/prometheus/discovery"" -diff --git a/pkg/querier/http.go b/pkg/querier/http.go -index 4a1f218d74c80..7f1d9ee3a1f8f 100644 ---- a/pkg/querier/http.go -+++ b/pkg/querier/http.go -@@ -11,7 +11,7 @@ import ( - ""time"" - - ""github.com/gorilla/mux"" -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/logproto"" - ) - - const ( -diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go -index 2f323484f812b..8a4c65f5f24cf 100644 ---- a/pkg/querier/querier.go -+++ b/pkg/querier/querier.go -@@ -9,10 +9,10 @@ import ( - ""github.com/cortexproject/cortex/pkg/util"" - ""google.golang.org/grpc/health/grpc_health_v1"" - -- ""github.com/grafana/tempo/pkg/helpers"" -- ""github.com/grafana/tempo/pkg/ingester/client"" -- ""github.com/grafana/tempo/pkg/iter"" -- ""github.com/grafana/tempo/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/helpers"" -+ ""github.com/grafana/loki/pkg/ingester/client"" -+ ""github.com/grafana/loki/pkg/iter"" -+ ""github.com/grafana/loki/pkg/logproto"" - ) - - // Config for a querier.",unknown,"Rename Tempo to Loki. (#36) - -* Rename Tempo to Loki. - -Signed-off-by: Tom Wilkie - -* Use new build image, don't delete generated files on clean. - -Signed-off-by: Tom Wilkie " -9688f83954cd1a2515166d93acc4e4621e7a2666,2023-03-17 03:10:10,Hervé Nicol,"helm chart: fix role/PSP mapping (#8656) - -**What this PR does / why we need it**: - -Fixes role/PSP and role/SCC mapping in Helm chart. - -**Which issue(s) this PR fixes**: -Fixes #8652 - -**Special notes for your reviewer**: - -**Checklist** -- [x] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [ ] Documentation added -- [ ] Tests updated -- [x] `CHANGELOG.md` updated -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/upgrading/_index.md` - -Co-authored-by: Herve Nicol <12008875+hervenicol@users.noreply.github.com>",False,"diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md -index 31e9c10f9ca45..718a8891eb597 100644 ---- a/production/helm/loki/CHANGELOG.md -+++ b/production/helm/loki/CHANGELOG.md -@@ -13,11 +13,12 @@ Entries should include a reference to the pull request that introduced the chang - - [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) - -+- [BUGFIX] Fix role/PSP mapping -+ - ## 4.8.0 - - - [CHANGE] Changed version of Grafana Enterprise Logs to v1.6.2 - -- - ## 4.7 - - - [CHANGE] **BREAKING** Rename `gel-license.jwt` property of secret `gel-secrets` to `license.jwt` on enterprise-logs chart. -diff --git a/production/helm/loki/templates/role.yaml b/production/helm/loki/templates/role.yaml -index 768dd39b56b3b..621b4bee79bf3 100644 ---- a/production/helm/loki/templates/role.yaml -+++ b/production/helm/loki/templates/role.yaml -@@ -15,7 +15,7 @@ rules: - verbs: - - use - resourceNames: -- - {{ include ""loki.fullname"" . }} -+ - {{ include ""loki.name"" . }} - {{- end }} - {{- if .Values.rbac.sccEnabled }} - rules: -@@ -26,6 +26,6 @@ rules: - verbs: - - use - resourceNames: -- - {{ include ""loki.fullname"" . }} -+ - {{ include ""loki.name"" . }} - {{- end }} - {{- end }}",unknown,"helm chart: fix role/PSP mapping (#8656) - -**What this PR does / why we need it**: - -Fixes role/PSP and role/SCC mapping in Helm chart. - -**Which issue(s) this PR fixes**: -Fixes #8652 - -**Special notes for your reviewer**: - -**Checklist** -- [x] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [ ] Documentation added -- [ ] Tests updated -- [x] `CHANGELOG.md` updated -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/upgrading/_index.md` - -Co-authored-by: Herve Nicol <12008875+hervenicol@users.noreply.github.com>" -c922e947082bf92ea35dfe17eec30ba96bd00756,2018-04-16 14:52:02,Tom Wilkie,Boilerplate for ingester.,False,"diff --git a/cmd/distributor/main.go b/cmd/distributor/main.go -index 20db52e8b44cc..bbcceea9f7af2 100644 ---- a/cmd/distributor/main.go -+++ b/cmd/distributor/main.go -@@ -25,7 +25,7 @@ func main() { - ringConfig ring.Config - distributorConfig distributor.Config - ) -- util.RegisterFlags(&distributorConfig) -+ util.RegisterFlags(&serverConfig, &ringConfig, &distributorConfig) - flag.Parse() - - r, err := ring.New(ringConfig) -diff --git a/cmd/ingester/Dockerfile b/cmd/ingester/Dockerfile -new file mode 100644 -index 0000000000000..6db61cddf0f44 ---- /dev/null -+++ b/cmd/ingester/Dockerfile -@@ -0,0 +1,4 @@ -+FROM alpine:3.4 -+COPY ingester /bin/ingester -+EXPOSE 80 -+ENTRYPOINT [ ""/bin/ingester"" ] -diff --git a/cmd/ingester/main.go b/cmd/ingester/main.go -index 79058077776c2..1e4c760eeac32 100644 ---- a/cmd/ingester/main.go -+++ b/cmd/ingester/main.go -@@ -1,5 +1,50 @@ - package main - -+import ( -+ ""flag"" -+ -+ log ""github.com/sirupsen/logrus"" -+ ""github.com/weaveworks/common/middleware"" -+ ""github.com/weaveworks/common/server"" -+ ""github.com/weaveworks/cortex/pkg/ring"" -+ ""github.com/weaveworks/cortex/pkg/util"" -+ ""google.golang.org/grpc"" -+ -+ ""github.com/grafana/logish/pkg/ingester"" -+ ""github.com/grafana/logish/pkg/logproto"" -+) -+ - func main() { -+ var ( -+ serverConfig = server.Config{ -+ MetricsNamespace: ""logish"", -+ GRPCMiddleware: []grpc.UnaryServerInterceptor{ -+ middleware.ServerUserHeaderInterceptor, -+ }, -+ } -+ ringConfig ring.Config -+ ingesterConfig ingester.Config -+ ) -+ util.RegisterFlags(&serverConfig, &ringConfig, &ingesterConfig) -+ flag.Parse() -+ -+ r, err := ring.New(ringConfig) -+ if err != nil { -+ log.Fatalf(""Error initializing ring: %v"", err) -+ } -+ defer r.Stop() -+ -+ ingester, err := ingester.New(ingesterConfig, r) -+ if err != nil { -+ log.Fatalf(""Error initializing ingester: %v"", err) -+ } -+ -+ server, err := server.New(serverConfig) -+ if err != nil { -+ log.Fatalf(""Error initializing server: %v"", err) -+ } -+ defer server.Shutdown() - -+ logproto.RegisterAggregatorServer(server.GRPC, ingester) -+ server.Run() - } -diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go -index fefd3d2f3a2cb..bca1169349a0f 100644 ---- a/pkg/ingester/ingester.go -+++ b/pkg/ingester/ingester.go -@@ -1,14 +1,68 @@ - package ingester - -+import ( -+ ""context"" -+ ""flag"" -+ ""sync"" -+ -+ ""github.com/weaveworks/common/user"" -+ ""github.com/weaveworks/cortex/pkg/ring"" -+ ""google.golang.org/grpc/health/grpc_health_v1"" -+ -+ ""github.com/grafana/logish/pkg/logproto"" -+) -+ - type Config struct { - } - -+func (cfg *Config) RegisterFlags(f *flag.FlagSet) { -+} -+ - type Ingester struct { - cfg Config -+ r ring.ReadRing -+ -+ instancesMtx sync.RWMutex -+ instances map[string]*instance - } - --func New(cfg Config) (*Ingester, error) { -+func New(cfg Config, r ring.ReadRing) (*Ingester, error) { - return &Ingester{ -- cfg: cfg, -+ cfg: cfg, -+ r: r, -+ instances: map[string]*instance{}, - }, nil - } -+ -+func (i *Ingester) Push(ctx context.Context, req *logproto.WriteRequest) (*logproto.WriteResponse, error) { -+ instanceID, err := user.ExtractOrgID(ctx) -+ if err != nil { -+ return nil, err -+ } -+ -+ instance := i.getOrCreateInstance(instanceID) -+ err = instance.Push(ctx, req) -+ return &logproto.WriteResponse{}, err -+} -+ -+func (i *Ingester) getOrCreateInstance(instanceID string) *instance { -+ i.instancesMtx.RLock() -+ inst, ok := i.instances[instanceID] -+ i.instancesMtx.RUnlock() -+ if ok { -+ return inst -+ } -+ -+ i.instancesMtx.Lock() -+ defer i.instancesMtx.Unlock() -+ inst, ok = i.instances[instanceID] -+ if !ok { -+ inst = &instance{} -+ i.instances[instanceID] = inst -+ } -+ return inst -+} -+ -+func (*Ingester) Check(ctx context.Context, req *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) { -+ return &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING}, nil -+} -diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go -new file mode 100644 -index 0000000000000..b774280830691 ---- /dev/null -+++ b/pkg/ingester/instance.go -@@ -0,0 +1,14 @@ -+package ingester -+ -+import ( -+ ""context"" -+ -+ ""github.com/grafana/logish/pkg/logproto"" -+) -+ -+type instance struct { -+} -+ -+func (i *instance) Push(ctx context.Context, req *logproto.WriteRequest) error { -+ return nil -+}",unknown,Boilerplate for ingester. -0f43bf6958a1f347361c144d2f26e1e2951af7ba,2024-03-22 01:20:27,Owen Diehl,fix(blooms): fix typo in pool decl (#12302),False,"diff --git a/pkg/storage/bloom/v1/util.go b/pkg/storage/bloom/v1/util.go -index 9c7a6f965c228..e6ad69a248fed 100644 ---- a/pkg/storage/bloom/v1/util.go -+++ b/pkg/storage/bloom/v1/util.go -@@ -35,7 +35,7 @@ var ( - // 4KB -> 64MB - BlockPool = BytePool{ - pool: pool.New( -- 4<<10, 64<<24, 4, -+ 4<<10, 64<<20, 4, - func(size int) interface{} { - return make([]byte, size) - }),",fix,fix typo in pool decl (#12302) -51bdd152374a2c2c9414181c118cc82f3dec6c1d,2019-06-01 03:18:39,Edward Welch,"renaming `metric` stage to `metrics` as it defines multiple metrics, similar to labels stage which is also plural. -Adding a couple unit tests to regex and json stage to act as examples",False,"diff --git a/pkg/logentry/stages/json_test.go b/pkg/logentry/stages/json_test.go -index 772a506c191ce..55a4e19d7a485 100644 ---- a/pkg/logentry/stages/json_test.go -+++ b/pkg/logentry/stages/json_test.go -@@ -7,11 +7,54 @@ import ( - - ""github.com/cortexproject/cortex/pkg/util"" - ""github.com/pkg/errors"" -+ ""github.com/prometheus/client_golang/prometheus"" - ""github.com/prometheus/common/model"" - ""github.com/stretchr/testify/assert"" - ""gopkg.in/yaml.v2"" - ) - -+var testJSONYaml = ` -+pipeline_stages: -+- json: -+ expressions: -+ out: message -+ app: -+ nested: -+ duration: -+` -+ -+var testJSONLogLine = ` -+{ -+ ""time"":""2012-11-01T22:08:41+00:00"", -+ ""app"":""loki"", -+ ""component"": [""parser"",""type""], -+ ""level"" : ""WARN"", -+ ""nested"" : {""child"":""value""}, -+ ""duration"" : 125, -+ ""message"" : ""this is a log line"" -+} -+` -+ -+func TestPipeline_JSON(t *testing.T) { -+ expected := map[string]interface{}{ -+ ""out"": ""this is a log line"", -+ ""app"": ""loki"", -+ ""nested"": ""{\""child\"":\""value\""}"", -+ ""duration"": float64(125), -+ } -+ -+ pl, err := NewPipeline(util.Logger, loadConfig(testJSONYaml), nil, prometheus.DefaultRegisterer) -+ if err != nil { -+ t.Fatal(err) -+ } -+ lbls := model.LabelSet{} -+ ts := time.Now() -+ entry := testJSONLogLine -+ extracted := map[string]interface{}{} -+ pl.Process(lbls, extracted, &ts, &entry) -+ assert.Equal(t, expected, extracted) -+} -+ - var cfg = `json: - expressions: - key1: expression1 -diff --git a/pkg/logentry/stages/metrics_test.go b/pkg/logentry/stages/metrics_test.go -index b404611ea6001..9eb3c38fa98ce 100644 ---- a/pkg/logentry/stages/metrics_test.go -+++ b/pkg/logentry/stages/metrics_test.go -@@ -19,7 +19,7 @@ pipeline_stages: - - json: - expressions: - app: app --- metric: -+- metrics: - loki_count: - type: Counter - description: uhhhhhhh -diff --git a/pkg/logentry/stages/regex_test.go b/pkg/logentry/stages/regex_test.go -index 830bc0159042c..be65fc215777b 100644 ---- a/pkg/logentry/stages/regex_test.go -+++ b/pkg/logentry/stages/regex_test.go -@@ -7,11 +7,47 @@ import ( - - ""github.com/cortexproject/cortex/pkg/util"" - ""github.com/pkg/errors"" -+ ""github.com/prometheus/client_golang/prometheus"" - ""github.com/prometheus/common/model"" - ""github.com/stretchr/testify/assert"" - ""gopkg.in/yaml.v2"" - ) - -+var testRegexYaml = ` -+pipeline_stages: -+- regex: -+ expression: ""^(?P\\S+) (?P\\S+) (?P\\S+) \\[(?P[\\w:/]+\\s[+\\-]\\d{4})\\] \""(?P\\S+)\\s?(?P\\S+)?\\s?(?P\\S+)?\"" (?P\\d{3}|-) (?P\\d+|-)\\s?\""?(?P[^\""]*)\""?\\s?\""?(?P[^\""]*)?\""?$"" -+` -+ -+var testRegexLogLine = `11.11.11.11 - frank [25/Jan/2000:14:00:01 -0500] ""GET /1986.js HTTP/1.1"" 200 932 ""-"" ""Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6""` -+ -+func TestPipeline_Regex(t *testing.T) { -+ expected := map[string]interface{}{ -+ ""ip"": ""11.11.11.11"", -+ ""identd"": ""-"", -+ ""user"": ""frank"", -+ ""timestamp"": ""25/Jan/2000:14:00:01 -0500"", -+ ""action"": ""GET"", -+ ""path"": ""/1986.js"", -+ ""protocol"": ""HTTP/1.1"", -+ ""status"": ""200"", -+ ""size"": ""932"", -+ ""referer"": ""-"", -+ ""useragent"": ""Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6"", -+ } -+ -+ pl, err := NewPipeline(util.Logger, loadConfig(testRegexYaml), nil, prometheus.DefaultRegisterer) -+ if err != nil { -+ t.Fatal(err) -+ } -+ lbls := model.LabelSet{} -+ ts := time.Now() -+ entry := testRegexLogLine -+ extracted := map[string]interface{}{} -+ pl.Process(lbls, extracted, &ts, &entry) -+ assert.Equal(t, expected, extracted) -+} -+ - var regexCfg = `regex: - expression: ""regexexpression""` - -diff --git a/pkg/logentry/stages/stage.go b/pkg/logentry/stages/stage.go -index f5f2d7f52b163..44db7f06628d2 100644 ---- a/pkg/logentry/stages/stage.go -+++ b/pkg/logentry/stages/stage.go -@@ -12,7 +12,7 @@ import ( - const ( - StageTypeJSON = ""json"" - StageTypeRegex = ""regex"" -- StageTypeMetric = ""metric"" -+ StageTypeMetric = ""metrics"" - StageTypeLabel = ""labels"" - StageTypeTimestamp = ""timestamp"" - StageTypeOutput = ""output""",unknown,"renaming `metric` stage to `metrics` as it defines multiple metrics, similar to labels stage which is also plural. -Adding a couple unit tests to regex and json stage to act as examples" -59a0fb3fc940dbbd05a35b598cf4358c8d93b15b,2023-10-19 18:36:25,Karsten Jeschkies,Install dlv 1.21.1 on Docker Compose dev env. (#10953),False,"diff --git a/tools/dev/loki-boltdb-storage-s3/config/loki.yaml b/tools/dev/loki-boltdb-storage-s3/config/loki.yaml -index eb012f8ba5cb2..be676796d4729 100644 ---- a/tools/dev/loki-boltdb-storage-s3/config/loki.yaml -+++ b/tools/dev/loki-boltdb-storage-s3/config/loki.yaml -@@ -31,7 +31,6 @@ frontend: - frontend_worker: - grpc_client_config: - max_send_msg_size: 1.048576e+08 -- parallelism: 6 - scheduler_address: query-scheduler:9009 - ingester: - chunk_block_size: 262144 -@@ -55,7 +54,6 @@ ingester: - http_client_timeout: 20s - store: consul - replication_factor: 3 -- max_transfer_retries: 0 - sync_min_utilization: 0.2 - sync_period: 15m - wal: -@@ -90,6 +88,7 @@ querier: - query_ingesters_within: 2h - multi_tenant_queries_enabled: true - per_request_limits_enabled: true -+ max_concurrent: 6 - query_range: - align_queries_with_step: true - cache_results: true -diff --git a/tools/dev/loki-boltdb-storage-s3/dev.dockerfile b/tools/dev/loki-boltdb-storage-s3/dev.dockerfile -index 38c690f03ef7c..3b8912b4120ab 100644 ---- a/tools/dev/loki-boltdb-storage-s3/dev.dockerfile -+++ b/tools/dev/loki-boltdb-storage-s3/dev.dockerfile -@@ -1,6 +1,6 @@ - FROM golang:1.20.4 - ENV CGO_ENABLED=0 --RUN go install github.com/go-delve/delve/cmd/dlv@v1.20.2 -+RUN go install github.com/go-delve/delve/cmd/dlv@v1.21.1 - - FROM alpine:3.18.3",unknown,Install dlv 1.21.1 on Docker Compose dev env. (#10953) -3d579fb3291db58491a1e32a78e7ea8e3f2e4d0a,2022-04-18 20:58:56,Kaviraj Kanagaraj,chore: Fix flaky test with `TestSingleIdx`. (#5947),False,"diff --git a/pkg/storage/stores/tsdb/single_file_index_test.go b/pkg/storage/stores/tsdb/single_file_index_test.go -index d9b49fd4be00e..f5bd7988b91f6 100644 ---- a/pkg/storage/stores/tsdb/single_file_index_test.go -+++ b/pkg/storage/stores/tsdb/single_file_index_test.go -@@ -2,6 +2,7 @@ package tsdb - - import ( - ""context"" -+ ""sort"" - ""testing"" - - ""github.com/go-kit/log"" -@@ -172,6 +173,7 @@ func TestSingleIdx(t *testing.T) { - // request data at the end of the tsdb range, but it should return all labels present - ls, err := idx.LabelNames(context.Background(), ""fake"", 9, 10) - require.Nil(t, err) -+ sort.Strings(ls) - require.Equal(t, []string{""bazz"", ""bonk"", ""foo""}, ls) - }) - -@@ -179,12 +181,14 @@ func TestSingleIdx(t *testing.T) { - // request data at the end of the tsdb range, but it should return all labels present - ls, err := idx.LabelNames(context.Background(), ""fake"", 9, 10, labels.MustNewMatcher(labels.MatchEqual, ""bazz"", ""buzz"")) - require.Nil(t, err) -+ sort.Strings(ls) - require.Equal(t, []string{""bazz"", ""foo""}, ls) - }) - - t.Run(""LabelValues"", func(t *testing.T) { - vs, err := idx.LabelValues(context.Background(), ""fake"", 9, 10, ""foo"") - require.Nil(t, err) -+ sort.Strings(vs) - require.Equal(t, []string{""bar"", ""bard""}, vs) - })",chore,Fix flaky test with `TestSingleIdx`. (#5947) -98a2daa292cd1126df8f5a94837e310783b00634,2023-02-03 22:19:11,Borna Zeba,"docs(monitor-and-alert): Fix typo in ServiceMonitor spelling (#7693) - -Co-authored-by: Karsten Jeschkies ",False,"diff --git a/docs/sources/installation/helm/monitor-and-alert/index.md b/docs/sources/installation/helm/monitor-and-alert/index.md -index ee6f3150863d8..3c8a2504c9323 100644 ---- a/docs/sources/installation/helm/monitor-and-alert/index.md -+++ b/docs/sources/installation/helm/monitor-and-alert/index.md -@@ -15,7 +15,7 @@ keywords: - - By default this Helm Chart configures meta-monitoring of metrics (service monitoring) and logs (self monitoring). - --The `ServiceMonitor` resource works with either the Prometheus Operator or the Grafana Agent Operator, and defines how Loki's metrics should be scraped. Scraping this Loki cluster using the scrape config defined in the `SerivceMonitor` resource is required for the included dashboards to work. A `MetricsInstance` can be configured to write the metrics to a remote Prometheus instance such as Grafana Cloud Metrics. -+The `ServiceMonitor` resource works with either the Prometheus Operator or the Grafana Agent Operator, and defines how Loki's metrics should be scraped. Scraping this Loki cluster using the scrape config defined in the `ServiceMonitor` resource is required for the included dashboards to work. A `MetricsInstance` can be configured to write the metrics to a remote Prometheus instance such as Grafana Cloud Metrics. - - _Self monitoring_ is enabled by default. This will deploy a `GrafanaAgent`, `LogsInstance`, and `PodLogs` resource which will instruct the Grafana Agent Operator (installed seperately) on how to scrape this Loki cluster's logs and send them back to itself. Scraping this Loki cluster using the scrape config defined in the `PodLogs` resource is required for the included dashboards to work.",docs,"Fix typo in ServiceMonitor spelling (#7693) - -Co-authored-by: Karsten Jeschkies " -b611641fea42c29d4dc5849c0ea34e63868d70d6,2023-06-22 16:00:32,Susana Ferreira,"Add comments to clarify Grouping struct in Aggregation expressions (#9761) - -**What this PR does / why we need it**: -Add comments to clarify the Grouping struct in Vector Aggregation and -Range Vector Aggregations expressions. - -Related with PR: https://github.com/grafana/loki/pull/9515 - -**Checklist** -- [ ] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [ ] Documentation added -- [ ] Tests updated -- [ ] `CHANGELOG.md` updated -- [ ] If the change is worth mentioning in the release notes, add -`add-to-release-notes` label -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/upgrading/_index.md` -- [ ] For Helm chart changes bump the Helm chart version in -`production/helm/loki/Chart.yaml` and update -`production/helm/loki/CHANGELOG.md` and -`production/helm/loki/README.md`. [Example -PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213)",False,"diff --git a/pkg/logql/syntax/ast.go b/pkg/logql/syntax/ast.go -index 801efa9117527..7348e4af7a180 100644 ---- a/pkg/logql/syntax/ast.go -+++ b/pkg/logql/syntax/ast.go -@@ -993,6 +993,8 @@ type SampleExpr interface { - Expr - } - -+// RangeAggregationExpr not all range vector aggregation expressions support grouping by/without label(s), -+// therefore the Grouping struct can be nil. - type RangeAggregationExpr struct { - Left *LogRange - Operation string -@@ -1117,6 +1119,13 @@ func (e *RangeAggregationExpr) Walk(f WalkFn) { - e.Left.Walk(f) - } - -+// Grouping struct represents the grouping by/without label(s) for vector aggregators and range vector aggregators. -+// The representation is as follows: -+// - No Grouping (labels dismissed): () => Grouping{Without: false, Groups: nil} -+// - Grouping by empty label set: by () () => Grouping{Without: false, Groups: []} -+// - Grouping by label set: by () () => Grouping{Without: false, Groups: []} -+// - Grouping without empty label set: without () () => Grouping{Without: true, Groups: []} -+// - Grouping without label set: without () () => Grouping{Without: true, Groups: []} - type Grouping struct { - Groups []string - Without bool -@@ -1148,6 +1157,8 @@ func (g Grouping) String() string { - return sb.String() - } - -+// VectorAggregationExpr all vector aggregation expressions support grouping by/without label(s), -+// therefore the Grouping struct can never be nil. - type VectorAggregationExpr struct { - Left SampleExpr",unknown,"Add comments to clarify Grouping struct in Aggregation expressions (#9761) - -**What this PR does / why we need it**: -Add comments to clarify the Grouping struct in Vector Aggregation and -Range Vector Aggregations expressions. - -Related with PR: https://github.com/grafana/loki/pull/9515 - -**Checklist** -- [ ] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [ ] Documentation added -- [ ] Tests updated -- [ ] `CHANGELOG.md` updated -- [ ] If the change is worth mentioning in the release notes, add -`add-to-release-notes` label -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/upgrading/_index.md` -- [ ] For Helm chart changes bump the Helm chart version in -`production/helm/loki/Chart.yaml` and update -`production/helm/loki/CHANGELOG.md` and -`production/helm/loki/README.md`. [Example -PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213)" -44ae10eaebfa1b310a8a40f7e915f161f5fe7e7c,2023-07-24 14:53:33,Michel Hollands,"Add 2.8.3 release to changelog (#10014) - -**What this PR does / why we need it**: -Add the PRs of the 2.8.3 release to the changelog - ---------- - -Signed-off-by: Michel Hollands ",False,"diff --git a/CHANGELOG.md b/CHANGELOG.md -index d4e39c9f6db0d..a4e1ffe0e5a3c 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -39,7 +39,6 @@ - * [9431](https://github.com/grafana/loki/pull/9431) **dannykopping**: Add more buckets to `loki_memcache_request_duration_seconds` metric; latencies can increase if using memcached with NVMe - * [8684](https://github.com/grafana/loki/pull/8684) **oleksii-boiko-ua**: Helm: Add hpa templates for read, write and backend components. - * [9535](https://github.com/grafana/loki/pull/9535) **salvacorts** Index stats cache can be configured independently of the results cache. If it's not configured, but it's enabled, it will use the results cache configuration. --* [9604](https://github.com/grafana/loki/pull/9604) **dannykopping**: Querier: configurable writeback queue bytes size - * [9626](https://github.com/grafana/loki/pull/9626) **ashwanthgoli** logfmt: add --strict flag to enable strict parsing, perform nostrict parsing by default - * [9672](https://github.com/grafana/loki/pull/9672) **zeitlinger**: Add `alignLeft` and `alignRight` line formatting functions. - * [9693](https://github.com/grafana/loki/pull/9693) **salvacorts** Add `keep` stage to LogQL. -@@ -56,10 +55,8 @@ - * [9252](https://github.com/grafana/loki/pull/9252) **jeschkies**: Use un-escaped regex literal for string matching. - * [9176](https://github.com/grafana/loki/pull/9176) **DylanGuedes**: Fix incorrect association of per-stream rate limit when sharding is enabled. - * [9463](https://github.com/grafana/loki/pull/9463) **Totalus**: Fix OpenStack Swift client object listing to fetch all the objects properly. --* [9471](https://github.com/grafana/loki/pull/9471) **sandeepsukhani**: query-scheduler: fix query distribution in SSD mode. - * [9495](https://github.com/grafana/loki/pull/9495) **thampiotr**: Promtail: Fix potential goroutine leak in file tailer. - * [9650](https://github.com/grafana/loki/pull/9650) **ashwanthgoli**: Config: ensure storage config defaults apply to named stores. --* [9629](https://github.com/grafana/loki/pull/9629) **periklis**: Fix duplicate label values from ingester streams. - * [9757](https://github.com/grafana/loki/pull/9757) **sandeepsukhani**: Frontend Caching: Fix a bug in negative logs results cache causing Loki to unexpectedly send empty/incorrect results. - * [9754](https://github.com/grafana/loki/pull/9754) **ashwanthgoli**: Fixes an issue with indexes becoming unqueriable if the index prefix is different from the one configured in the latest period config. - * [9763](https://github.com/grafana/loki/pull/9763) **ssncferreira**: Fix the logic of the `offset` operator for downstream queries on instant query splitting of (range) vector aggregation expressions containing an offset. -@@ -82,8 +79,6 @@ - - ##### Fixes - --* [8988](https://github.com/grafana/loki/pull/8988) **darxriggs**: Promtail: Prevent logging errors on normal shutdown. --* [9155](https://github.com/grafana/loki/pull/9155) **farodin91**: Promtail: Break on iterate journal failure. - * [8987](https://github.com/grafana/loki/pull/8987) **darxriggs**: Promtail: Fix file descriptor leak. - * [9863](https://github.com/grafana/loki/pull/9863) **ashwanthgoli**: Promtail: Apply defaults to HTTP client config. This ensures follow_redirects is set to true. - * [9915](https://github.com/grafana/loki/pull/9915) **frittentheke**: Promtail: Update grafana/tail to address issue in retry logic -@@ -116,6 +111,30 @@ - * [8880](https://github.com/grafana/loki/pull/8880) **JoaoBraveCoding**: Normalize headless service name for query-frontend/scheduler - * [9978](https://github.com/grafana/loki/pull/9978) ****vlad-diachenko****: replaced deprecated `policy.v1beta1` with `policy.v1`. - -+## 2.8.3 (2023-07-21) -+ -+#### Loki -+ -+##### Security -+ -+* [9913](https://github.com/grafana/loki/pull/9913) **MichelHollands**: Upgrade go version to 1.20.6 -+ -+##### Enhancements -+ -+* [9604](https://github.com/grafana/loki/pull/9604) **dannykopping**: Querier: configurable writeback queue bytes size -+ -+##### Fixes -+ -+* [9471](https://github.com/grafana/loki/pull/9471) **sandeepsukhani**: query-scheduler: fix query distribution in SSD mode. -+* [9629](https://github.com/grafana/loki/pull/9629) **periklis**: Fix duplicate label values from ingester streams. -+ -+#### Promtail -+ -+##### Fixes -+ -+* [9155](https://github.com/grafana/loki/pull/9155) **farodin91**: Promtail: Break on iterate journal failure. -+* [8988](https://github.com/grafana/loki/pull/8988) **darxriggs**: Promtail: Prevent logging errors on normal shutdown. -+ - ## 2.8.2 (2023-05-03) - - #### Loki",unknown,"Add 2.8.3 release to changelog (#10014) - -**What this PR does / why we need it**: -Add the PRs of the 2.8.3 release to the changelog - ---------- - -Signed-off-by: Michel Hollands " -531905c06641ea7515faa32e00749d1a5659a1ff,2022-01-28 23:09:04,Kaviraj Kanagaraj,"Remove `cortex` references from `go.mod` and makefile (#5269) - -* Remove cortex references from `go.mod` and makefile - -Signed-off-by: Kaviraj - -* Remove from `modules.txt` - -Signed-off-by: Kaviraj ",False,"diff --git a/Makefile b/Makefile -index 777c2cbb89baf..8e3e2c84f5f19 100644 ---- a/Makefile -+++ b/Makefile -@@ -269,10 +269,6 @@ lint: - GO111MODULE=on GOGC=10 golangci-lint run -v $(GOLANGCI_ARG) - faillint -paths ""sync/atomic=go.uber.org/atomic"" ./... - -- # Ensure packages imported by downstream projects (eg. GEM) don't depend on other packages -- # vendoring Cortex's cortexpb (to avoid conflicting imports in downstream projects). -- faillint -paths ""github.com/grafana/loki/pkg/util/server/...,github.com/grafana/loki/pkg/storage/...,github.com/cortexproject/cortex/pkg/cortexpb"" ./pkg/logql/... -- - ######## - # Test # - ######## -diff --git a/go.mod b/go.mod -index 52a155ffc0e09..c8157e32978b8 100644 ---- a/go.mod -+++ b/go.mod -@@ -141,7 +141,6 @@ require ( - github.com/coreos/go-semver v0.3.0 // indirect - github.com/coreos/go-systemd/v22 v22.3.2 // indirect - github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect -- github.com/cortexproject/cortex v1.10.1-0.20220110092510-e0807c4eb487 // indirect - github.com/dennwc/varint v1.0.0 // indirect - github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/digitalocean/godo v1.73.0 // indirect -diff --git a/go.sum b/go.sum -index e5980403bd583..9e0ac353e40b8 100644 ---- a/go.sum -+++ b/go.sum -@@ -475,9 +475,8 @@ github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV - github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= - github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= - github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -+github.com/cortexproject/cortex v1.10.1-0.20211124141505-4e9fc3a2b5ab h1:THN4VQQqsZn5gNwcmQJO1GarnfZkSWfp5824ifoD9fQ= - github.com/cortexproject/cortex v1.10.1-0.20211124141505-4e9fc3a2b5ab/go.mod h1:njSBkQ1wUNx9X4knV/j65Pi4ItlJXX4QwXRKoMflJd8= --github.com/cortexproject/cortex v1.10.1-0.20220110092510-e0807c4eb487 h1:P8B9MSfGN24zxs3c/r+s4waxRjheP8VL8tVWOHvbraY= --github.com/cortexproject/cortex v1.10.1-0.20220110092510-e0807c4eb487/go.mod h1:pdpYEJZNU+Nem+BhEB/9Ml5Btushrs3hkWYbABvxkgU= - github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= - github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= - github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= -diff --git a/vendor/modules.txt b/vendor/modules.txt -index 9f4da09d3304c..218df25bae387 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -246,8 +246,6 @@ github.com/coreos/go-systemd/v22/journal - # github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f - ## explicit - github.com/coreos/pkg/capnslog --# github.com/cortexproject/cortex v1.10.1-0.20220110092510-e0807c4eb487 --## explicit; go 1.16 - # github.com/cristalhq/hedgedhttp v0.7.0 - ## explicit; go 1.16 - github.com/cristalhq/hedgedhttp",unknown,"Remove `cortex` references from `go.mod` and makefile (#5269) - -* Remove cortex references from `go.mod` and makefile - -Signed-off-by: Kaviraj - -* Remove from `modules.txt` - -Signed-off-by: Kaviraj " -b7d23f41bddf715640d6b014cfa78e7244a8ddfd,2020-01-29 22:19:13,Owen Diehl,"Decouple logql engine/AST from execution context (#1605) - -* logql engine is an interface - -* [wip] begins agnostic logql evaluator work - -* decouples logql AST from execution context - -* healthcheck comments",False,"diff --git a/pkg/logql/ast.go b/pkg/logql/ast.go -index c503dd5c7a03b..de0821432d5cb 100644 ---- a/pkg/logql/ast.go -+++ b/pkg/logql/ast.go -@@ -3,6 +3,7 @@ package logql - import ( - ""bytes"" - ""context"" -+ ""errors"" - ""fmt"" - ""regexp"" - ""strconv"" -@@ -212,30 +213,46 @@ const ( - type SampleExpr interface { - // Selector is the LogQL selector to apply when retrieving logs. - Selector() LogSelectorExpr -- // Evaluator returns a `StepEvaluator` that can evaluate the expression step by step -- Evaluator() StepEvaluator -- // Close all resources used. -- Close() error - } - - // StepEvaluator evaluate a single step of a query. - type StepEvaluator interface { - Next() (bool, int64, promql.Vector) -+ // Close all resources used. -+ Close() error -+} -+ -+type stepEvaluator struct { -+ fn func() (bool, int64, promql.Vector) -+ close func() error - } - --// StepEvaluatorFn is a function to chain multiple `StepEvaluator`. --type StepEvaluatorFn func() (bool, int64, promql.Vector) -+func newStepEvaluator(fn func() (bool, int64, promql.Vector), close func() error) (StepEvaluator, error) { -+ if fn == nil { -+ return nil, errors.New(""nil step evaluator fn"") -+ } - --// Next implements `StepEvaluator` --func (s StepEvaluatorFn) Next() (bool, int64, promql.Vector) { -- return s() -+ if close == nil { -+ close = func() error { return nil } -+ } -+ -+ return &stepEvaluator{ -+ fn: fn, -+ close: close, -+ }, nil -+} -+ -+func (e *stepEvaluator) Next() (bool, int64, promql.Vector) { -+ return e.fn() -+} -+ -+func (e *stepEvaluator) Close() error { -+ return e.close() - } - - type rangeAggregationExpr struct { - left *logRange - operation string -- -- iterator RangeVectorIterator - } - - func newRangeAggregationExpr(left *logRange, operation string) SampleExpr { -@@ -245,13 +262,6 @@ func newRangeAggregationExpr(left *logRange, operation string) SampleExpr { - } - } - --func (e *rangeAggregationExpr) Close() error { -- if e.iterator == nil { -- return nil -- } -- return e.iterator.Close() --} -- - func (e *rangeAggregationExpr) Selector() LogSelectorExpr { - return e.left.left - } -@@ -296,10 +306,6 @@ func mustNewVectorAggregationExpr(left SampleExpr, operation string, gr *groupin - } - } - --func (v *vectorAggregationExpr) Close() error { -- return v.left.Close() --} -- - func (v *vectorAggregationExpr) Selector() LogSelectorExpr { - return v.left.Selector() - } -diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go -index 113ef5111f1de..e1d457a0f0fb0 100644 ---- a/pkg/logql/engine.go -+++ b/pkg/logql/engine.go -@@ -1,9 +1,7 @@ - package logql - - import ( -- ""container/heap"" - ""context"" -- ""math"" - ""sort"" - ""time"" - -@@ -12,7 +10,6 @@ import ( - ""github.com/grafana/loki/pkg/iter"" - ""github.com/grafana/loki/pkg/logproto"" - ""github.com/grafana/loki/pkg/logql/stats"" -- ""github.com/pkg/errors"" - ""github.com/prometheus/client_golang/prometheus"" - ""github.com/prometheus/client_golang/prometheus/promauto"" - ""github.com/prometheus/prometheus/pkg/labels"" -@@ -60,18 +57,32 @@ func (opts *EngineOpts) applyDefault() { - } - } - --// Engine is the LogQL engine. --type Engine struct { -- timeout time.Duration -- maxLookBackPeriod time.Duration -+// Engine interface used to construct queries -+type Engine interface { -+ NewRangeQuery(qs string, start, end time.Time, step time.Duration, direction logproto.Direction, limit uint32) Query -+ NewInstantQuery(qs string, ts time.Time, direction logproto.Direction, limit uint32) Query -+} -+ -+// engine is the LogQL engine. -+type engine struct { -+ timeout time.Duration -+ evaluator Evaluator - } - - // NewEngine creates a new LogQL engine. --func NewEngine(opts EngineOpts) *Engine { -+func NewEngine(opts EngineOpts, q Querier) Engine { -+ if q == nil { -+ panic(""nil Querier"") -+ } -+ - opts.applyDefault() -- return &Engine{ -- timeout: opts.Timeout, -- maxLookBackPeriod: opts.MaxLookBackPeriod, -+ -+ return &engine{ -+ timeout: opts.Timeout, -+ evaluator: &defaultEvaluator{ -+ querier: q, -+ maxLookBackPeriod: opts.MaxLookBackPeriod, -+ }, - } - } - -@@ -82,24 +93,16 @@ type Query interface { - } - - type query struct { -- querier Querier -- qs string -- start, end time.Time -- step time.Duration -- direction logproto.Direction -- limit uint32 -- -- ng *Engine --} -+ LiteralParams - --func (q *query) isInstant() bool { -- return q.start == q.end && q.step == 0 -+ ng *engine - } - - // Exec Implements `Query` - func (q *query) Exec(ctx context.Context) (promql.Value, error) { - var queryType string -- if q.isInstant() { -+ -+ if IsInstant(q) { - queryType = ""instant"" - } else { - queryType = ""range"" -@@ -110,55 +113,57 @@ func (q *query) Exec(ctx context.Context) (promql.Value, error) { - } - - // NewRangeQuery creates a new LogQL range query. --func (ng *Engine) NewRangeQuery( -- q Querier, -+func (ng *engine) NewRangeQuery( - qs string, - start, end time.Time, step time.Duration, - direction logproto.Direction, limit uint32) Query { - return &query{ -- querier: q, -- qs: qs, -- start: start, -- end: end, -- step: step, -- direction: direction, -- limit: limit, -- ng: ng, -+ LiteralParams: LiteralParams{ -+ qs: qs, -+ start: start, -+ end: end, -+ step: step, -+ direction: direction, -+ limit: limit, -+ }, -+ ng: ng, - } - } - - // NewInstantQuery creates a new LogQL instant query. --func (ng *Engine) NewInstantQuery( -- q Querier, -+func (ng *engine) NewInstantQuery( - qs string, - ts time.Time, - direction logproto.Direction, limit uint32) Query { - return &query{ -- querier: q, -- qs: qs, -- start: ts, -- end: ts, -- step: 0, -- direction: direction, -- limit: limit, -- ng: ng, -+ LiteralParams: LiteralParams{ -+ qs: qs, -+ start: ts, -+ end: ts, -+ step: 0, -+ direction: direction, -+ limit: limit, -+ }, -+ ng: ng, - } - } - --func (ng *Engine) exec(ctx context.Context, q *query) (promql.Value, error) { -+func (ng *engine) exec(ctx context.Context, q *query) (promql.Value, error) { - log, ctx := spanlogger.New(ctx, ""Engine.exec"") - defer log.Finish() - ctx, cancel := context.WithTimeout(ctx, ng.timeout) - defer cancel() - -- if q.qs == ""1+1"" { -- if q.isInstant() { -+ qs := q.String() -+ // This is a legacy query used for health checking. Not the best practice, but it works. -+ if qs == ""1+1"" { -+ if IsInstant(q) { - return promql.Vector{}, nil - } - return promql.Matrix{}, nil - } - -- expr, err := ParseExpr(q.qs) -+ expr, err := ParseExpr(qs) - if err != nil { - return nil, err - } -@@ -172,26 +177,10 @@ func (ng *Engine) exec(ctx context.Context, q *query) (promql.Value, error) { - - switch e := expr.(type) { - case SampleExpr: -- if err := ng.setupIterators(ctx, e, q); err != nil { -- return nil, err -- } -- return ng.evalSample(e, q), nil -+ return ng.evalSample(ctx, e, q) - - case LogSelectorExpr: -- params := SelectParams{ -- QueryRequest: &logproto.QueryRequest{ -- Start: q.start, -- End: q.end, -- Limit: q.limit, -- Direction: q.direction, -- Selector: e.String(), -- }, -- } -- // instant query, we look back to find logs near the requested ts. -- if q.isInstant() { -- params.Start = params.Start.Add(-ng.maxLookBackPeriod) -- } -- iter, err := q.querier.Select(ctx, params) -+ iter, err := ng.evaluator.Iterator(ctx, e, q) - if err != nil { - return nil, err - } -@@ -202,44 +191,20 @@ func (ng *Engine) exec(ctx context.Context, q *query) (promql.Value, error) { - return nil, nil - } - --// setupIterators walk through the AST tree and build iterators required to eval samples. --func (ng *Engine) setupIterators(ctx context.Context, expr SampleExpr, q *query) error { -- if expr == nil { -- return nil -- } -- switch e := expr.(type) { -- case *vectorAggregationExpr: -- return ng.setupIterators(ctx, e.left, q) -- case *rangeAggregationExpr: -- iter, err := q.querier.Select(ctx, SelectParams{ -- &logproto.QueryRequest{ -- Start: q.start.Add(-e.left.interval), -- End: q.end, -- Limit: 0, -- Direction: logproto.FORWARD, -- Selector: e.Selector().String(), -- }, -- }) -- if err != nil { -- return err -- } -- e.iterator = newRangeVectorIterator(iter, e.left.interval.Nanoseconds(), q.step.Nanoseconds(), -- q.start.UnixNano(), q.end.UnixNano()) -- } -- return nil --} -- - // evalSample evaluate a sampleExpr --func (ng *Engine) evalSample(expr SampleExpr, q *query) promql.Value { -- defer helpers.LogError(""closing SampleExpr"", expr.Close) -+func (ng *engine) evalSample(ctx context.Context, expr SampleExpr, q *query) (promql.Value, error) { - -- stepEvaluator := expr.Evaluator() -+ stepEvaluator, err := ng.evaluator.Evaluator(ctx, expr, q) -+ defer helpers.LogError(""closing SampleExpr"", stepEvaluator.Close) -+ if err != nil { -+ return nil, err -+ } - seriesIndex := map[uint64]*promql.Series{} - - next, ts, vec := stepEvaluator.Next() -- if q.isInstant() { -+ if IsInstant(q) { - sort.Slice(vec, func(i, j int) bool { return labels.Compare(vec[i].Metric, vec[j].Metric) < 0 }) -- return vec -+ return vec, nil - } - for next { - for _, p := range vec { -@@ -270,7 +235,7 @@ func (ng *Engine) evalSample(expr SampleExpr, q *query) promql.Value { - } - result := promql.Matrix(series) - sort.Sort(result) -- return result -+ return result, nil - } - - func readStreams(i iter.EntryIterator, size uint32) (Streams, error) { -@@ -304,211 +269,6 @@ type groupedAggregation struct { - reverseHeap vectorByReverseValueHeap - } - --// Evaluator implements `SampleExpr` for a vectorAggregationExpr --// this is copied and adapted from Prometheus vector aggregation code. --func (v *vectorAggregationExpr) Evaluator() StepEvaluator { -- nextEvaluator := v.left.Evaluator() -- return StepEvaluatorFn(func() (bool, int64, promql.Vector) { -- next, ts, vec := nextEvaluator.Next() -- if !next { -- return false, 0, promql.Vector{} -- } -- result := map[uint64]*groupedAggregation{} -- if v.operation == OpTypeTopK || v.operation == OpTypeBottomK { -- if v.params < 1 { -- return next, ts, promql.Vector{} -- } -- -- } -- for _, s := range vec { -- metric := s.Metric -- -- var ( -- groupingKey uint64 -- ) -- if v.grouping.without { -- groupingKey, _ = metric.HashWithoutLabels(make([]byte, 0, 1024), v.grouping.groups...) -- } else { -- groupingKey, _ = metric.HashForLabels(make([]byte, 0, 1024), v.grouping.groups...) -- } -- group, ok := result[groupingKey] -- // Add a new group if it doesn't exist. -- if !ok { -- var m labels.Labels -- -- if v.grouping.without { -- lb := labels.NewBuilder(metric) -- lb.Del(v.grouping.groups...) -- lb.Del(labels.MetricName) -- m = lb.Labels() -- } else { -- m = make(labels.Labels, 0, len(v.grouping.groups)) -- for _, l := range metric { -- for _, n := range v.grouping.groups { -- if l.Name == n { -- m = append(m, l) -- break -- } -- } -- } -- sort.Sort(m) -- } -- result[groupingKey] = &groupedAggregation{ -- labels: m, -- value: s.V, -- mean: s.V, -- groupCount: 1, -- } -- -- inputVecLen := len(vec) -- resultSize := v.params -- if v.params > inputVecLen { -- resultSize = inputVecLen -- } -- if v.operation == OpTypeStdvar || v.operation == OpTypeStddev { -- result[groupingKey].value = 0.0 -- } else if v.operation == OpTypeTopK { -- result[groupingKey].heap = make(vectorByValueHeap, 0, resultSize) -- heap.Push(&result[groupingKey].heap, &promql.Sample{ -- Point: promql.Point{V: s.V}, -- Metric: s.Metric, -- }) -- } else if v.operation == OpTypeBottomK { -- result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 0, resultSize) -- heap.Push(&result[groupingKey].reverseHeap, &promql.Sample{ -- Point: promql.Point{V: s.V}, -- Metric: s.Metric, -- }) -- } -- continue -- } -- switch v.operation { -- case OpTypeSum: -- group.value += s.V -- -- case OpTypeAvg: -- group.groupCount++ -- group.mean += (s.V - group.mean) / float64(group.groupCount) -- -- case OpTypeMax: -- if group.value < s.V || math.IsNaN(group.value) { -- group.value = s.V -- } -- -- case OpTypeMin: -- if group.value > s.V || math.IsNaN(group.value) { -- group.value = s.V -- } -- -- case OpTypeCount: -- group.groupCount++ -- -- case OpTypeStddev, OpTypeStdvar: -- group.groupCount++ -- delta := s.V - group.mean -- group.mean += delta / float64(group.groupCount) -- group.value += delta * (s.V - group.mean) -- -- case OpTypeTopK: -- if len(group.heap) < v.params || group.heap[0].V < s.V || math.IsNaN(group.heap[0].V) { -- if len(group.heap) == v.params { -- heap.Pop(&group.heap) -- } -- heap.Push(&group.heap, &promql.Sample{ -- Point: promql.Point{V: s.V}, -- Metric: s.Metric, -- }) -- } -- -- case OpTypeBottomK: -- if len(group.reverseHeap) < v.params || group.reverseHeap[0].V > s.V || math.IsNaN(group.reverseHeap[0].V) { -- if len(group.reverseHeap) == v.params { -- heap.Pop(&group.reverseHeap) -- } -- heap.Push(&group.reverseHeap, &promql.Sample{ -- Point: promql.Point{V: s.V}, -- Metric: s.Metric, -- }) -- } -- default: -- panic(errors.Errorf(""expected aggregation operator but got %q"", v.operation)) -- } -- } -- vec = vec[:0] -- for _, aggr := range result { -- switch v.operation { -- case OpTypeAvg: -- aggr.value = aggr.mean -- -- case OpTypeCount: -- aggr.value = float64(aggr.groupCount) -- -- case OpTypeStddev: -- aggr.value = math.Sqrt(aggr.value / float64(aggr.groupCount)) -- -- case OpTypeStdvar: -- aggr.value = aggr.value / float64(aggr.groupCount) -- -- case OpTypeTopK: -- // The heap keeps the lowest value on top, so reverse it. -- sort.Sort(sort.Reverse(aggr.heap)) -- for _, v := range aggr.heap { -- vec = append(vec, promql.Sample{ -- Metric: v.Metric, -- Point: promql.Point{ -- T: ts, -- V: v.V, -- }, -- }) -- } -- continue // Bypass default append. -- -- case OpTypeBottomK: -- // The heap keeps the lowest value on top, so reverse it. -- sort.Sort(sort.Reverse(aggr.reverseHeap)) -- for _, v := range aggr.reverseHeap { -- vec = append(vec, promql.Sample{ -- Metric: v.Metric, -- Point: promql.Point{ -- T: ts, -- V: v.V, -- }, -- }) -- } -- continue // Bypass default append. -- default: -- } -- vec = append(vec, promql.Sample{ -- Metric: aggr.labels, -- Point: promql.Point{ -- T: ts, -- V: aggr.value, -- }, -- }) -- } -- return next, ts, vec -- }) --} -- --// Evaluator implements `SampleExpr` for a rangeAggregationExpr --func (e *rangeAggregationExpr) Evaluator() StepEvaluator { -- var fn RangeVectorAggregator -- switch e.operation { -- case OpTypeRate: -- fn = rate(e.left.interval) -- case OpTypeCountOverTime: -- fn = count -- } -- return StepEvaluatorFn(func() (bool, int64, promql.Vector) { -- next := e.iterator.Next() -- if !next { -- return false, 0, promql.Vector{} -- } -- ts, vec := e.iterator.At(fn) -- return true, ts, vec -- }) --} -- - // rate calculate the per-second rate of log lines. - func rate(selRange time.Duration) func(ts int64, samples []promql.Point) float64 { - return func(ts int64, samples []promql.Point) float64 { -diff --git a/pkg/logql/engine_test.go b/pkg/logql/engine_test.go -index 547c93d3b3730..05bcee531060c 100644 ---- a/pkg/logql/engine_test.go -+++ b/pkg/logql/engine_test.go -@@ -18,7 +18,6 @@ var testSize = int64(300) - - func TestEngine_NewInstantQuery(t *testing.T) { - t.Parallel() -- eng := NewEngine(EngineOpts{}) - for _, test := range []struct { - qs string - ts time.Time -@@ -291,7 +290,8 @@ func TestEngine_NewInstantQuery(t *testing.T) { - t.Run(fmt.Sprintf(""%s %s"", test.qs, test.direction), func(t *testing.T) { - t.Parallel() - -- q := eng.NewInstantQuery(newQuerierRecorder(test.streams, test.params), test.qs, test.ts, test.direction, test.limit) -+ eng := NewEngine(EngineOpts{}, newQuerierRecorder(test.streams, test.params)) -+ q := eng.NewInstantQuery(test.qs, test.ts, test.direction, test.limit) - res, err := q.Exec(context.Background()) - if err != nil { - t.Fatal(err) -@@ -303,7 +303,6 @@ func TestEngine_NewInstantQuery(t *testing.T) { - - func TestEngine_NewRangeQuery(t *testing.T) { - t.Parallel() -- eng := NewEngine(EngineOpts{}) - for _, test := range []struct { - qs string - start time.Time -@@ -680,7 +679,9 @@ func TestEngine_NewRangeQuery(t *testing.T) { - t.Run(fmt.Sprintf(""%s %s"", test.qs, test.direction), func(t *testing.T) { - t.Parallel() - -- q := eng.NewRangeQuery(newQuerierRecorder(test.streams, test.params), test.qs, test.start, test.end, test.step, test.direction, test.limit) -+ eng := NewEngine(EngineOpts{}, newQuerierRecorder(test.streams, test.params)) -+ -+ q := eng.NewRangeQuery(test.qs, test.start, test.end, test.step, test.direction, test.limit) - res, err := q.Exec(context.Background()) - if err != nil { - t.Fatal(err) -@@ -709,10 +710,9 @@ var result promql.Value - - func benchmarkRangeQuery(testsize int64, b *testing.B) { - b.ReportAllocs() -- eng := NewEngine(EngineOpts{}) -+ eng := NewEngine(EngineOpts{}, getLocalQuerier(testsize)) - start := time.Unix(0, 0) - end := time.Unix(testsize, 0) -- querier := getLocalQuerier(testsize) - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, test := range []struct { -@@ -741,7 +741,7 @@ func benchmarkRangeQuery(testsize int64, b *testing.B) { - {`bottomk(2,rate(({app=~""foo|bar""} |~"".+bar"")[1m]))`, logproto.FORWARD}, - {`bottomk(3,rate(({app=~""foo|bar""} |~"".+bar"")[1m])) without (app)`, logproto.FORWARD}, - } { -- q := eng.NewRangeQuery(querier, test.qs, start, end, 60*time.Second, test.direction, 1000) -+ q := eng.NewRangeQuery(test.qs, start, end, 60*time.Second, test.direction, 1000) - res, err := q.Exec(context.Background()) - if err != nil { - b.Fatal(err) -diff --git a/pkg/logql/evaluator.go b/pkg/logql/evaluator.go -new file mode 100644 -index 0000000000000..589fdf802ebd4 ---- /dev/null -+++ b/pkg/logql/evaluator.go -@@ -0,0 +1,327 @@ -+package logql -+ -+import ( -+ ""container/heap"" -+ ""context"" -+ ""math"" -+ ""sort"" -+ ""time"" -+ -+ ""github.com/grafana/loki/pkg/iter"" -+ ""github.com/grafana/loki/pkg/logproto"" -+ ""github.com/pkg/errors"" -+ ""github.com/prometheus/prometheus/pkg/labels"" -+ ""github.com/prometheus/prometheus/promql"" -+) -+ -+// Params details the parameters associated with a loki request -+type Params interface { -+ String() string -+ Start() time.Time -+ End() time.Time -+ Step() time.Duration -+ Limit() uint32 -+ Direction() logproto.Direction -+} -+ -+// LiteralParams impls Params -+type LiteralParams struct { -+ qs string -+ start, end time.Time -+ step time.Duration -+ direction logproto.Direction -+ limit uint32 -+} -+ -+// String impls Params -+func (p LiteralParams) String() string { return p.qs } -+ -+// Start impls Params -+func (p LiteralParams) Start() time.Time { return p.start } -+ -+// End impls Params -+func (p LiteralParams) End() time.Time { return p.end } -+ -+// Step impls Params -+func (p LiteralParams) Step() time.Duration { return p.step } -+ -+// Limit impls Params -+func (p LiteralParams) Limit() uint32 { return p.limit } -+ -+// Direction impls Params -+func (p LiteralParams) Direction() logproto.Direction { return p.direction } -+ -+// IsInstant returns whether a query is an instant query -+func IsInstant(q Params) bool { -+ return q.Start() == q.End() && q.Step() == 0 -+} -+ -+// Evaluator is an interface for iterating over data at different nodes in the AST -+type Evaluator interface { -+ // Evaluator returns a StepEvaluator for a given SampleExpr -+ Evaluator(context.Context, SampleExpr, Params) (StepEvaluator, error) -+ // Iterator returns the iter.EntryIterator for a given LogSelectorExpr -+ Iterator(context.Context, LogSelectorExpr, Params) (iter.EntryIterator, error) -+} -+ -+type defaultEvaluator struct { -+ maxLookBackPeriod time.Duration -+ querier Querier -+} -+ -+func (ev *defaultEvaluator) Iterator(ctx context.Context, expr LogSelectorExpr, q Params) (iter.EntryIterator, error) { -+ params := SelectParams{ -+ QueryRequest: &logproto.QueryRequest{ -+ Start: q.Start(), -+ End: q.End(), -+ Limit: q.Limit(), -+ Direction: q.Direction(), -+ Selector: expr.String(), -+ }, -+ } -+ -+ if IsInstant(q) { -+ params.Start = params.Start.Add(-ev.maxLookBackPeriod) -+ } -+ -+ return ev.querier.Select(ctx, params) -+ -+} -+ -+func (ev *defaultEvaluator) Evaluator(ctx context.Context, expr SampleExpr, q Params) (StepEvaluator, error) { -+ switch e := expr.(type) { -+ case *vectorAggregationExpr: -+ return ev.vectorAggEvaluator(ctx, e, q) -+ case *rangeAggregationExpr: -+ return ev.rangeAggEvaluator(ctx, e, q) -+ -+ default: -+ return nil, errors.Errorf(""unexpected type (%T): %v"", e, e) -+ } -+} -+ -+func (ev *defaultEvaluator) vectorAggEvaluator(ctx context.Context, expr *vectorAggregationExpr, q Params) (StepEvaluator, error) { -+ nextEvaluator, err := ev.Evaluator(ctx, expr.left, q) -+ if err != nil { -+ return nil, err -+ } -+ -+ return newStepEvaluator(func() (bool, int64, promql.Vector) { -+ next, ts, vec := nextEvaluator.Next() -+ if !next { -+ return false, 0, promql.Vector{} -+ } -+ result := map[uint64]*groupedAggregation{} -+ if expr.operation == OpTypeTopK || expr.operation == OpTypeBottomK { -+ if expr.params < 1 { -+ return next, ts, promql.Vector{} -+ } -+ -+ } -+ for _, s := range vec { -+ metric := s.Metric -+ -+ var ( -+ groupingKey uint64 -+ ) -+ if expr.grouping.without { -+ groupingKey, _ = metric.HashWithoutLabels(make([]byte, 0, 1024), expr.grouping.groups...) -+ } else { -+ groupingKey, _ = metric.HashForLabels(make([]byte, 0, 1024), expr.grouping.groups...) -+ } -+ group, ok := result[groupingKey] -+ // Add a new group if it doesn't exist. -+ if !ok { -+ var m labels.Labels -+ -+ if expr.grouping.without { -+ lb := labels.NewBuilder(metric) -+ lb.Del(expr.grouping.groups...) -+ lb.Del(labels.MetricName) -+ m = lb.Labels() -+ } else { -+ m = make(labels.Labels, 0, len(expr.grouping.groups)) -+ for _, l := range metric { -+ for _, n := range expr.grouping.groups { -+ if l.Name == n { -+ m = append(m, l) -+ break -+ } -+ } -+ } -+ sort.Sort(m) -+ } -+ result[groupingKey] = &groupedAggregation{ -+ labels: m, -+ value: s.V, -+ mean: s.V, -+ groupCount: 1, -+ } -+ -+ inputVecLen := len(vec) -+ resultSize := expr.params -+ if expr.params > inputVecLen { -+ resultSize = inputVecLen -+ } -+ if expr.operation == OpTypeStdvar || expr.operation == OpTypeStddev { -+ result[groupingKey].value = 0.0 -+ } else if expr.operation == OpTypeTopK { -+ result[groupingKey].heap = make(vectorByValueHeap, 0, resultSize) -+ heap.Push(&result[groupingKey].heap, &promql.Sample{ -+ Point: promql.Point{V: s.V}, -+ Metric: s.Metric, -+ }) -+ } else if expr.operation == OpTypeBottomK { -+ result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 0, resultSize) -+ heap.Push(&result[groupingKey].reverseHeap, &promql.Sample{ -+ Point: promql.Point{V: s.V}, -+ Metric: s.Metric, -+ }) -+ } -+ continue -+ } -+ switch expr.operation { -+ case OpTypeSum: -+ group.value += s.V -+ -+ case OpTypeAvg: -+ group.groupCount++ -+ group.mean += (s.V - group.mean) / float64(group.groupCount) -+ -+ case OpTypeMax: -+ if group.value < s.V || math.IsNaN(group.value) { -+ group.value = s.V -+ } -+ -+ case OpTypeMin: -+ if group.value > s.V || math.IsNaN(group.value) { -+ group.value = s.V -+ } -+ -+ case OpTypeCount: -+ group.groupCount++ -+ -+ case OpTypeStddev, OpTypeStdvar: -+ group.groupCount++ -+ delta := s.V - group.mean -+ group.mean += delta / float64(group.groupCount) -+ group.value += delta * (s.V - group.mean) -+ -+ case OpTypeTopK: -+ if len(group.heap) < expr.params || group.heap[0].V < s.V || math.IsNaN(group.heap[0].V) { -+ if len(group.heap) == expr.params { -+ heap.Pop(&group.heap) -+ } -+ heap.Push(&group.heap, &promql.Sample{ -+ Point: promql.Point{V: s.V}, -+ Metric: s.Metric, -+ }) -+ } -+ -+ case OpTypeBottomK: -+ if len(group.reverseHeap) < expr.params || group.reverseHeap[0].V > s.V || math.IsNaN(group.reverseHeap[0].V) { -+ if len(group.reverseHeap) == expr.params { -+ heap.Pop(&group.reverseHeap) -+ } -+ heap.Push(&group.reverseHeap, &promql.Sample{ -+ Point: promql.Point{V: s.V}, -+ Metric: s.Metric, -+ }) -+ } -+ default: -+ panic(errors.Errorf(""expected aggregation operator but got %q"", expr.operation)) -+ } -+ } -+ vec = vec[:0] -+ for _, aggr := range result { -+ switch expr.operation { -+ case OpTypeAvg: -+ aggr.value = aggr.mean -+ -+ case OpTypeCount: -+ aggr.value = float64(aggr.groupCount) -+ -+ case OpTypeStddev: -+ aggr.value = math.Sqrt(aggr.value / float64(aggr.groupCount)) -+ -+ case OpTypeStdvar: -+ aggr.value = aggr.value / float64(aggr.groupCount) -+ -+ case OpTypeTopK: -+ // The heap keeps the lowest value on top, so reverse it. -+ sort.Sort(sort.Reverse(aggr.heap)) -+ for _, v := range aggr.heap { -+ vec = append(vec, promql.Sample{ -+ Metric: v.Metric, -+ Point: promql.Point{ -+ T: ts, -+ V: v.V, -+ }, -+ }) -+ } -+ continue // Bypass default append. -+ -+ case OpTypeBottomK: -+ // The heap keeps the lowest value on top, so reverse it. -+ sort.Sort(sort.Reverse(aggr.reverseHeap)) -+ for _, v := range aggr.reverseHeap { -+ vec = append(vec, promql.Sample{ -+ Metric: v.Metric, -+ Point: promql.Point{ -+ T: ts, -+ V: v.V, -+ }, -+ }) -+ } -+ continue // Bypass default append. -+ default: -+ } -+ vec = append(vec, promql.Sample{ -+ Metric: aggr.labels, -+ Point: promql.Point{ -+ T: ts, -+ V: aggr.value, -+ }, -+ }) -+ } -+ return next, ts, vec -+ -+ }, nextEvaluator.Close) -+} -+ -+func (ev *defaultEvaluator) rangeAggEvaluator(ctx context.Context, expr *rangeAggregationExpr, q Params) (StepEvaluator, error) { -+ entryIter, err := ev.querier.Select(ctx, SelectParams{ -+ &logproto.QueryRequest{ -+ Start: q.Start().Add(-expr.left.interval), -+ End: q.End(), -+ Limit: 0, -+ Direction: logproto.FORWARD, -+ Selector: expr.Selector().String(), -+ }, -+ }) -+ -+ if err != nil { -+ return nil, err -+ } -+ -+ vecIter := newRangeVectorIterator(entryIter, expr.left.interval.Nanoseconds(), q.Step().Nanoseconds(), -+ q.Start().UnixNano(), q.End().UnixNano()) -+ -+ var fn RangeVectorAggregator -+ switch expr.operation { -+ case OpTypeRate: -+ fn = rate(expr.left.interval) -+ case OpTypeCountOverTime: -+ fn = count -+ } -+ -+ return newStepEvaluator(func() (bool, int64, promql.Vector) { -+ next := vecIter.Next() -+ if !next { -+ return false, 0, promql.Vector{} -+ } -+ ts, vec := vecIter.At(fn) -+ return true, ts, vec -+ -+ }, vecIter.Close) -+} -diff --git a/pkg/querier/http.go b/pkg/querier/http.go -index 33347a3175f80..158f6761ffec1 100644 ---- a/pkg/querier/http.go -+++ b/pkg/querier/http.go -@@ -41,7 +41,7 @@ func (q *Querier) RangeQueryHandler(w http.ResponseWriter, r *http.Request) { - http.Error(w, httpgrpc.Errorf(http.StatusBadRequest, err.Error()).Error(), http.StatusBadRequest) - return - } -- query := q.engine.NewRangeQuery(q, request.Query, request.Start, request.End, request.Step, request.Direction, request.Limit) -+ query := q.engine.NewRangeQuery(request.Query, request.Start, request.End, request.Step, request.Direction, request.Limit) - result, err := query.Exec(ctx) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) -@@ -65,7 +65,7 @@ func (q *Querier) InstantQueryHandler(w http.ResponseWriter, r *http.Request) { - http.Error(w, httpgrpc.Errorf(http.StatusBadRequest, err.Error()).Error(), http.StatusBadRequest) - return - } -- query := q.engine.NewInstantQuery(q, request.Query, request.Ts, request.Direction, request.Limit) -+ query := q.engine.NewInstantQuery(request.Query, request.Ts, request.Direction, request.Limit) - result, err := query.Exec(ctx) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) -@@ -111,7 +111,7 @@ func (q *Querier) LogQueryHandler(w http.ResponseWriter, r *http.Request) { - return - } - -- query := q.engine.NewRangeQuery(q, request.Query, request.Start, request.End, request.Step, request.Direction, request.Limit) -+ query := q.engine.NewRangeQuery(request.Query, request.Start, request.End, request.Step, request.Direction, request.Limit) - result, err := query.Exec(ctx) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) -diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go -index 8358b8be41b0d..39e647fa84f38 100644 ---- a/pkg/querier/querier.go -+++ b/pkg/querier/querier.go -@@ -60,7 +60,7 @@ type Querier struct { - ring ring.ReadRing - pool *cortex_client.Pool - store storage.Store -- engine *logql.Engine -+ engine logql.Engine - limits *validation.Overrides - } - -@@ -76,14 +76,16 @@ func New(cfg Config, clientCfg client.Config, ring ring.ReadRing, store storage. - // newQuerier creates a new Querier and allows to pass a custom ingester client factory - // used for testing purposes - func newQuerier(cfg Config, clientCfg client.Config, clientFactory cortex_client.Factory, ring ring.ReadRing, store storage.Store, limits *validation.Overrides) (*Querier, error) { -- return &Querier{ -+ querier := Querier{ - cfg: cfg, - ring: ring, - pool: cortex_client.NewPool(clientCfg.PoolConfig, ring, clientFactory, util.Logger), - store: store, -- engine: logql.NewEngine(cfg.Engine), - limits: limits, -- }, nil -+ } -+ querier.engine = logql.NewEngine(cfg.Engine, &querier) -+ -+ return &querier, nil - } - - type responseFromIngesters struct {",unknown,"Decouple logql engine/AST from execution context (#1605) - -* logql engine is an interface - -* [wip] begins agnostic logql evaluator work - -* decouples logql AST from execution context - -* healthcheck comments" -7435695991b23c93603af16ece7aa34a43c94f40,2018-12-12 23:47:28,Trent White,"Add files via upload (#83) - -* Add files via upload - -Signed-off-by: Tom Wilkie - -* Centering not working for some reason. - -Signed-off-by: Tom Wilkie ",False,"diff --git a/README.md b/README.md -index 8c9b2b0822338..c9bee55b4b8af 100644 ---- a/README.md -+++ b/README.md -@@ -1,11 +1,9 @@ --

-- --

-+

- - - - --

-+ - - # Loki: like Prometheus, but for logs. - -diff --git a/docs/logo.png b/docs/logo.png -index 1cd7ddeafa9f6..8e89cb402b223 100644 -Binary files a/docs/logo.png and b/docs/logo.png differ -diff --git a/docs/logo_and_name.png b/docs/logo_and_name.png -index f1892b385d1a0..db5ec01f74e02 100644 -Binary files a/docs/logo_and_name.png and b/docs/logo_and_name.png differ",unknown,"Add files via upload (#83) - -* Add files via upload - -Signed-off-by: Tom Wilkie - -* Centering not working for some reason. - -Signed-off-by: Tom Wilkie " -ab1caea12325b5db777101347acf4f277312adf6,2024-08-29 22:06:56,Trevor Whitney,feat: add _extracted suffix to detected fields conflicts (#13993),False,"diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go -index b320e5c5fd6ad..a03182ae2b942 100644 ---- a/pkg/querier/querier.go -+++ b/pkg/querier/querier.go -@@ -1199,6 +1199,11 @@ func parseDetectedFields(limit uint32, streams logqlmodel.Streams) map[string]*p - emtpyparser := """" - - for _, stream := range streams { -+ streamLbls, err := syntax.ParseLabels(stream.Labels) -+ if err != nil { -+ streamLbls = labels.EmptyLabels() -+ } -+ - for _, entry := range stream.Entries { - structuredMetadata := getStructuredMetadata(entry) - for k, vals := range structuredMetadata { -@@ -1226,7 +1231,7 @@ func parseDetectedFields(limit uint32, streams logqlmodel.Streams) map[string]*p - } - } - -- detected, parser := parseLine(entry.Line) -+ detected, parser := parseLine(entry.Line, streamLbls) - for k, vals := range detected { - df, ok := detectedFields[k] - if !ok && fieldCount < limit { -@@ -1283,11 +1288,11 @@ func getStructuredMetadata(entry push.Entry) map[string][]string { - return result - } - --func parseLine(line string) (map[string][]string, *string) { -+func parseLine(line string, streamLbls labels.Labels) (map[string][]string, *string) { - parser := ""logfmt"" - logFmtParser := logql_log.NewLogfmtParser(true, false) - -- lbls := logql_log.NewBaseLabelsBuilder().ForLabels(labels.EmptyLabels(), 0) -+ lbls := logql_log.NewBaseLabelsBuilder().ForLabels(streamLbls, 0) - _, logfmtSuccess := logFmtParser.Process(0, []byte(line), lbls) - if !logfmtSuccess || lbls.HasErr() { - parser = ""json"" -@@ -1301,6 +1306,10 @@ func parseLine(line string) (map[string][]string, *string) { - - parsedLabels := map[string]map[string]struct{}{} - for _, lbl := range lbls.LabelsResult().Labels() { -+ // skip indexed labels, as we only want detected fields -+ if streamLbls.Has(lbl.Name) { -+ continue -+ } - if values, ok := parsedLabels[lbl.Name]; ok { - values[lbl.Value] = struct{}{} - } else { -diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go -index 4ddbab7ed2e59..c783b1bf11e34 100644 ---- a/pkg/querier/querier_mock_test.go -+++ b/pkg/querier/querier_mock_test.go -@@ -575,7 +575,7 @@ func mockStreamWithLabels(from int, quantity int, labels string) logproto.Stream - } - - func mockLogfmtStream(from int, quantity int) logproto.Stream { -- return mockLogfmtStreamWithLabels(from, quantity, `{type=""test""}`) -+ return mockLogfmtStreamWithLabels(from, quantity, `{type=""test"", name=""foo""}`) - } - - func mockLogfmtStreamWithLabels(_ int, quantity int, labels string) logproto.Stream { -@@ -586,7 +586,7 @@ func mockLogfmtStreamWithLabels(_ int, quantity int, labels string) logproto.Str - entries = append(entries, logproto.Entry{ - Timestamp: time.Unix(int64(i), 0), - Line: fmt.Sprintf( -- `message=""line %d"" count=%d fake=true bytes=%dMB duration=%dms percent=%f even=%t`, -+ `message=""line %d"" count=%d fake=true bytes=%dMB duration=%dms percent=%f even=%t name=bar`, - i, - i, - (i * 10), -diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go -index 7336c3b11bfaf..701f01bfefd38 100644 ---- a/pkg/querier/querier_test.go -+++ b/pkg/querier/querier_test.go -@@ -1777,15 +1777,16 @@ func TestQuerier_DetectedFields(t *testing.T) { - detectedFields := resp.Fields - // log lines come from querier_mock_test.go - // message=""line %d"" count=%d fake=true bytes=%dMB duration=%dms percent=%f even=%t -- assert.Len(t, detectedFields, 7) -+ assert.Len(t, detectedFields, 8) - expectedCardinality := map[string]uint64{ -- ""message"": 5, -- ""count"": 5, -- ""fake"": 1, -- ""bytes"": 5, -- ""duration"": 5, -- ""percent"": 5, -- ""even"": 2, -+ ""message"": 5, -+ ""count"": 5, -+ ""fake"": 1, -+ ""bytes"": 5, -+ ""duration"": 5, -+ ""percent"": 5, -+ ""even"": 2, -+ ""name_extracted"": 1, - } - for _, d := range detectedFields { - card := expectedCardinality[d.Label] -@@ -1821,17 +1822,18 @@ func TestQuerier_DetectedFields(t *testing.T) { - detectedFields := resp.Fields - // log lines come from querier_mock_test.go - // message=""line %d"" count=%d fake=true bytes=%dMB duration=%dms percent=%f even=%t -- assert.Len(t, detectedFields, 9) -+ assert.Len(t, detectedFields, 10) - expectedCardinality := map[string]uint64{ -- ""variable"": 5, -- ""constant"": 1, -- ""message"": 5, -- ""count"": 5, -- ""fake"": 1, -- ""bytes"": 5, -- ""duration"": 5, -- ""percent"": 5, -- ""even"": 2, -+ ""variable"": 5, -+ ""constant"": 1, -+ ""message"": 5, -+ ""count"": 5, -+ ""fake"": 1, -+ ""bytes"": 5, -+ ""duration"": 5, -+ ""percent"": 5, -+ ""even"": 2, -+ ""name_extracted"": 1, - } - for _, d := range detectedFields { - card := expectedCardinality[d.Label] -@@ -1867,7 +1869,7 @@ func TestQuerier_DetectedFields(t *testing.T) { - detectedFields := resp.Fields - // log lines come from querier_mock_test.go - // message=""line %d"" count=%d fake=true bytes=%dMB duration=%dms percent=%f even=%t -- assert.Len(t, detectedFields, 7) -+ assert.Len(t, detectedFields, 8) - - var messageField, countField, bytesField, durationField, floatField, evenField *logproto.DetectedField - for _, field := range detectedFields { -@@ -1923,7 +1925,7 @@ func TestQuerier_DetectedFields(t *testing.T) { - detectedFields := resp.Fields - // log lines come from querier_mock_test.go - // message=""line %d"" count=%d fake=true bytes=%dMB duration=%dms percent=%f even=%t -- assert.Len(t, detectedFields, 9) -+ assert.Len(t, detectedFields, 10) - - var messageField, countField, bytesField, durationField, floatField, evenField, constantField, variableField *logproto.DetectedField - for _, field := range detectedFields { -@@ -1955,7 +1957,56 @@ func TestQuerier_DetectedFields(t *testing.T) { - assert.Equal(t, []string{""logfmt""}, evenField.Parsers) - assert.Equal(t, []string{""""}, constantField.Parsers) - assert.Equal(t, []string{""""}, variableField.Parsers) -- }) -+ }, -+ ) -+ -+ t.Run( -+ ""adds _extracted suffix to detected fields that conflict with indexed labels"", -+ func(t *testing.T) { -+ store := newStoreMock() -+ store.On(""SelectLogs"", mock.Anything, mock.Anything). -+ Return(mockLogfmtStreamIterator(1, 2), nil) -+ -+ queryClient := newQueryClientMock() -+ queryClient.On(""Recv""). -+ Return(mockQueryResponse([]logproto.Stream{mockLogfmtStreamWithStructuredMetadata(1, 2)}), nil) -+ -+ ingesterClient := newQuerierClientMock() -+ ingesterClient.On(""Query"", mock.Anything, mock.Anything, mock.Anything). -+ Return(queryClient, nil) -+ -+ querier, err := newQuerier( -+ conf, -+ mockIngesterClientConfig(), -+ newIngesterClientMockFactory(ingesterClient), -+ mockReadRingWithOneActiveIngester(), -+ &mockDeleteGettter{}, -+ store, limits) -+ require.NoError(t, err) -+ -+ resp, err := querier.DetectedFields(ctx, &request) -+ require.NoError(t, err) -+ -+ detectedFields := resp.Fields -+ // log lines come from querier_mock_test.go -+ // message=""line %d"" count=%d fake=true bytes=%dMB duration=%dms percent=%f even=%t -+ assert.Len(t, detectedFields, 10) -+ -+ var nameField *logproto.DetectedField -+ for _, field := range detectedFields { -+ switch field.Label { -+ case ""name_extracted"": -+ nameField = field -+ } -+ } -+ -+ assert.NotNil(t, nameField) -+ assert.Equal(t, ""name_extracted"", nameField.Label) -+ assert.Equal(t, logproto.DetectedFieldString, nameField.Type) -+ assert.Equal(t, []string{""logfmt""}, nameField.Parsers) -+ assert.Equal(t, uint64(1), nameField.Cardinality) -+ }, -+ ) - } - - func BenchmarkQuerierDetectedFields(b *testing.B) {",feat,add _extracted suffix to detected fields conflicts (#13993) -c45a5f9e8f6b48dc1596328d9dce03077941866f,2025-03-01 02:55:14,renovate[bot],"chore(deps): update terraform aws to ~> 5.89.0 (main) (#16507) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/production/terraform/modules/s3/versions.tf b/production/terraform/modules/s3/versions.tf -index df633f61344d7..f4ebc6fa47903 100644 ---- a/production/terraform/modules/s3/versions.tf -+++ b/production/terraform/modules/s3/versions.tf -@@ -2,7 +2,7 @@ terraform { - required_providers { - aws = { - source = ""hashicorp/aws"" -- version = ""~> 5.88.0"" -+ version = ""~> 5.89.0"" - } - - random = {",chore,"update terraform aws to ~> 5.89.0 (main) (#16507) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -e950251744955e93027dbd06d0b99c541e1ce32a,2024-11-15 21:49:36,Tugdual Saunier,"feat: build the Docker Driver for arm64 (#9247) - -Co-authored-by: Trevor Whitney ",False,"diff --git a/.github/jsonnetfile.json b/.github/jsonnetfile.json -index 58c6d34c67c48..731d3c3c82974 100644 ---- a/.github/jsonnetfile.json -+++ b/.github/jsonnetfile.json -@@ -8,7 +8,7 @@ - ""subdir"": ""workflows"" - } - }, -- ""version"": ""20aac53fcb06d378b1c1101c7e4dc989466eb4ff"" -+ ""version"": ""21f1189544e3976070cbdb6463f64c7a32dcc176"" - } - ], - ""legacyImports"": true -diff --git a/.github/jsonnetfile.lock.json b/.github/jsonnetfile.lock.json -index 5d356b0b4fb0b..d11f133725822 100644 ---- a/.github/jsonnetfile.lock.json -+++ b/.github/jsonnetfile.lock.json -@@ -8,8 +8,8 @@ - ""subdir"": ""workflows"" - } - }, -- ""version"": ""20aac53fcb06d378b1c1101c7e4dc989466eb4ff"", -- ""sum"": ""bo355Fm9Gm1TU13MjlXGXgrCXo4CPr7aEeTvgNFYAl8="" -+ ""version"": ""21f1189544e3976070cbdb6463f64c7a32dcc176"", -+ ""sum"": ""IPS1oGR8k7jk6J2snciTycWFgtISCwXSPhJ3A+nEGvY="" - } - ], - ""legacyImports"": false -diff --git a/.github/release-workflows.jsonnet b/.github/release-workflows.jsonnet -index 6c16af50ad74c..bc00da6e2ad40 100644 ---- a/.github/release-workflows.jsonnet -+++ b/.github/release-workflows.jsonnet -@@ -15,6 +15,7 @@ local imageJobs = { - 'loki-canary-boringcrypto': build.image('loki-canary-boringcrypto', 'cmd/loki-canary-boringcrypto'), - promtail: build.image('promtail', 'clients/cmd/promtail'), - querytee: build.image('loki-query-tee', 'cmd/querytee', platform=['linux/amd64']), -+ 'loki-docker-driver': build.dockerPlugin('grafana/loki-docker-driver', 'clients/cmd/docker-driver', platform=['linux/amd64', 'linux/arm64']), - }; - - local weeklyImageJobs = { -@@ -27,6 +28,7 @@ local weeklyImageJobs = { - 'loki-canary-boringcrypto': build.weeklyImage('loki-canary-boringcrypto', 'cmd/loki-canary-boringcrypto'), - promtail: build.weeklyImage('promtail', 'clients/cmd/promtail'), - querytee: build.weeklyImage('loki-query-tee', 'cmd/querytee', platform=['linux/amd64']), -+ 'loki-docker-driver': build.weeklyDockerPlugin('grafana/loki-docker-driver', 'clients/cmd/docker-driver', platform=['linux/amd64', 'linux/arm64']), - }; - - local buildImageVersion = std.extVar('BUILD_IMAGE_VERSION'); -diff --git a/.github/vendor/github.com/grafana/loki-release/workflows/build.libsonnet b/.github/vendor/github.com/grafana/loki-release/workflows/build.libsonnet -index 7343c7d72963d..72a47e901e890 100644 ---- a/.github/vendor/github.com/grafana/loki-release/workflows/build.libsonnet -+++ b/.github/vendor/github.com/grafana/loki-release/workflows/build.libsonnet -@@ -105,6 +105,152 @@ local releaseLibStep = common.releaseLibStep; - }), - ]), - -+ dockerPlugin: function( -+ name, -+ path, -+ dockerfile='Dockerfile', -+ context='release', -+ platform=[ -+ 'linux/amd64', -+ 'linux/arm64', -+ 'linux/arm', -+ ] -+ ) -+ job.new() -+ + job.withStrategy({ -+ 'fail-fast': true, -+ matrix: { -+ platform: platform, -+ }, -+ }) -+ + job.withSteps([ -+ common.fetchReleaseLib, -+ common.fetchReleaseRepo, -+ common.setupNode, -+ common.googleAuth, -+ -+ step.new('Set up QEMU', 'docker/setup-qemu-action@v3'), -+ step.new('set up docker buildx', 'docker/setup-buildx-action@v3'), -+ -+ releaseStep('parse image platform') -+ + step.withId('platform') -+ + step.withRun(||| -+ mkdir -p images -+ -+ platform=""$(echo ""${{ matrix.platform}}"" | sed ""s/\(.*\)\/\(.*\)/\1-\2/"")"" -+ echo ""platform=${platform}"" >> $GITHUB_OUTPUT -+ echo ""platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)"" >> $GITHUB_OUTPUT -+ if [[ ""${platform}"" == ""linux/arm64"" ]]; then -+ echo ""plugin_arch=-arm64"" >> $GITHUB_OUTPUT -+ else -+ echo ""plugin_arch="" >> $GITHUB_OUTPUT -+ fi -+ |||), -+ -+ step.new('Build and export', 'docker/build-push-action@v6') -+ + step.withTimeoutMinutes('${{ fromJSON(env.BUILD_TIMEOUT) }}') -+ + step.withIf('${{ fromJSON(needs.version.outputs.pr_created) }}') -+ + step.withEnv({ -+ IMAGE_TAG: '${{ needs.version.outputs.version }}', -+ }) -+ + step.with({ -+ context: context, -+ file: 'release/%s/%s' % [path, dockerfile], -+ platforms: '${{ matrix.platform }}', -+ push: false, -+ tags: '${{ env.IMAGE_PREFIX }}/%s:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}' % [name], -+ outputs: 'type=docker,dest=release/images/%s-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar' % name, -+ 'build-args': 'IMAGE_TAG=${{ needs.version.outputs.version }},GOARCH=${{ steps.platform.outputs.platform_short }}', -+ }), -+ -+ releaseStep('Package as Docker plugin') -+ + step.withIf('${{ fromJSON(needs.version.outputs.pr_created) }}') -+ + step.withEnv({ -+ IMAGE_TAG: '${{ needs.version.outputs.version }}', -+ BUILD_DIR: 'release/%s' % [path], -+ }) -+ + step.withRun(||| -+ rm -rf ""${{ env.BUILD_DIR }}/rootfs"" || true -+ mkdir ""${{ env.BUILD_DIR }}/rootfs"" -+ tar -x -C ""${{ env.BUILD_DIR }}/rootfs"" -f ""release/images/%s-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"" -+ docker plugin create ""${{ env.IMAGE_TAG }}${{ steps.platform.outputs.plugin_arch }}"" ""${{ env.BUILD_DIR }}"" -+ |||), -+ -+ step.new('upload artifacts', 'google-github-actions/upload-cloud-storage@v2') -+ + step.withIf('${{ fromJSON(needs.version.outputs.pr_created) }}') -+ + step.with({ -+ path: 'release/images/%s-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar' % name, -+ destination: '${{ env.BUILD_ARTIFACTS_BUCKET }}/${{ github.sha }}/images', //TODO: make bucket configurable -+ process_gcloudignore: false, -+ }), -+ ]), -+ -+ weeklyDockerPlugin: function( -+ name, -+ path, -+ dockerfile='Dockerfile', -+ context='release', -+ platform=[ -+ 'linux/amd64', -+ 'linux/arm64', -+ 'linux/arm', -+ ] -+ ) -+ job.new() -+ + job.withStrategy({ -+ matrix: { -+ platform: platform, -+ }, -+ }) -+ + job.withSteps([ -+ common.fetchReleaseLib, -+ common.fetchReleaseRepo, -+ common.setupNode, -+ -+ step.new('Set up QEMU', 'docker/setup-qemu-action@v3'), -+ step.new('set up docker buildx', 'docker/setup-buildx-action@v3'), -+ step.new('Login to DockerHub (from vault)', 'grafana/shared-workflows/actions/dockerhub-login@main'), -+ -+ releaseStep('Get weekly version') -+ + step.withId('weekly-version') -+ + step.withRun(||| -+ echo ""version=$(./tools/image-tag)"" >> $GITHUB_OUTPUT -+ -+ platform=""$(echo ""${{ matrix.platform}}"" | sed ""s/\(.*\)\/\(.*\)/\1-\2/"")"" -+ echo ""platform=${platform}"" >> $GITHUB_OUTPUT -+ echo ""platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)"" >> $GITHUB_OUTPUT -+ if [[ ""${platform}"" == ""linux/arm64"" ]]; then -+ echo ""plugin_arch=-arm64"" >> $GITHUB_OUTPUT -+ else -+ echo ""plugin_arch="" >> $GITHUB_OUTPUT -+ fi -+ |||), -+ -+ step.new('Build and export', 'docker/build-push-action@v6') -+ + step.withTimeoutMinutes('${{ fromJSON(env.BUILD_TIMEOUT) }}') -+ + step.with({ -+ context: context, -+ file: 'release/%s/%s' % [path, dockerfile], -+ platforms: '${{ matrix.platform }}', -+ push: false, -+ tags: '${{ env.IMAGE_PREFIX }}/%s:${{ steps.weekly-version.outputs.version }}' % [name], -+ outputs: 'type=docker,dest=release/images/%s-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar' % name, -+ 'build-args': 'IMAGE_TAG=${{ steps.weekly-version.outputs.version }},GOARCH=${{ steps.weekly-version.outputs.platform_short }}', -+ }), -+ -+ releaseStep('Package and push as Docker plugin') -+ + step.withEnv({ -+ IMAGE_TAG: '${{ steps.weekly-version.outputs.version }}', -+ BUILD_DIR: 'release/%s' % [path], -+ }) -+ + step.withRun(||| -+ rm -rf ""${{ env.BUILD_DIR }}/rootfs"" || true -+ mkdir ""${{ env.BUILD_DIR }}/rootfs"" -+ tar -x -C ""${{ env.BUILD_DIR }}/rootfs"" -f ""release/images/%s-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"" -+ docker plugin create ""${{ env.IMAGE_TAG }}${{ steps.platform.outputs.plugin_arch }}"" ""${{ env.BUILD_DIR }}"" -+ docker plugin push ""${{ env.IMAGE_TAG }}${{ steps.platform.outputs.plugin_arch }}"" -+ |||), -+ ]), - - version: - job.new() -diff --git a/.github/workflows/images.yml b/.github/workflows/images.yml -index 40801f19317d3..64dac0158637a 100644 ---- a/.github/workflows/images.yml -+++ b/.github/workflows/images.yml -@@ -329,6 +329,78 @@ - ""platforms"": ""linux/amd64,linux/arm64,linux/arm"" - ""push"": true - ""tags"": ""${{ env.IMAGE_PREFIX }}/loki-canary-boringcrypto:${{ steps.weekly-version.outputs.version }}"" -+ ""loki-docker-driver"": -+ ""env"": -+ ""BUILD_TIMEOUT"": 60 -+ ""IMAGE_PREFIX"": ""grafana"" -+ ""RELEASE_LIB_REF"": ""main"" -+ ""RELEASE_REPO"": ""grafana/loki"" -+ ""needs"": -+ - ""check"" -+ ""runs-on"": ""ubuntu-latest"" -+ ""steps"": -+ - ""name"": ""pull release library code"" -+ ""uses"": ""actions/checkout@v4"" -+ ""with"": -+ ""path"": ""lib"" -+ ""ref"": ""${{ env.RELEASE_LIB_REF }}"" -+ ""repository"": ""grafana/loki-release"" -+ - ""name"": ""pull code to release"" -+ ""uses"": ""actions/checkout@v4"" -+ ""with"": -+ ""path"": ""release"" -+ ""repository"": ""${{ env.RELEASE_REPO }}"" -+ - ""name"": ""setup node"" -+ ""uses"": ""actions/setup-node@v4"" -+ ""with"": -+ ""node-version"": 20 -+ - ""name"": ""Set up QEMU"" -+ ""uses"": ""docker/setup-qemu-action@v3"" -+ - ""name"": ""set up docker buildx"" -+ ""uses"": ""docker/setup-buildx-action@v3"" -+ - ""name"": ""Login to DockerHub (from vault)"" -+ ""uses"": ""grafana/shared-workflows/actions/dockerhub-login@main"" -+ - ""id"": ""weekly-version"" -+ ""name"": ""Get weekly version"" -+ ""run"": | -+ echo ""version=$(./tools/image-tag)"" >> $GITHUB_OUTPUT -+ -+ platform=""$(echo ""${{ matrix.platform}}"" | sed ""s/\(.*\)\/\(.*\)/\1-\2/"")"" -+ echo ""platform=${platform}"" >> $GITHUB_OUTPUT -+ echo ""platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)"" >> $GITHUB_OUTPUT -+ if [[ ""${platform}"" == ""linux/arm64"" ]]; then -+ echo ""plugin_arch=-arm64"" >> $GITHUB_OUTPUT -+ else -+ echo ""plugin_arch="" >> $GITHUB_OUTPUT -+ fi -+ ""working-directory"": ""release"" -+ - ""name"": ""Build and export"" -+ ""timeout-minutes"": ""${{ fromJSON(env.BUILD_TIMEOUT) }}"" -+ ""uses"": ""docker/build-push-action@v6"" -+ ""with"": -+ ""build-args"": ""IMAGE_TAG=${{ steps.weekly-version.outputs.version }},GOARCH=${{ steps.weekly-version.outputs.platform_short }}"" -+ ""context"": ""release"" -+ ""file"": ""release/clients/cmd/docker-driver/Dockerfile"" -+ ""outputs"": ""type=docker,dest=release/images/grafana/loki-docker-driver-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"" -+ ""platforms"": ""${{ matrix.platform }}"" -+ ""push"": false -+ ""tags"": ""${{ env.IMAGE_PREFIX }}/grafana/loki-docker-driver:${{ steps.weekly-version.outputs.version }}"" -+ - ""env"": -+ ""BUILD_DIR"": ""release/clients/cmd/docker-driver"" -+ ""IMAGE_TAG"": ""${{ steps.weekly-version.outputs.version }}"" -+ ""name"": ""Package and push as Docker plugin"" -+ ""run"": | -+ rm -rf ""${{ env.BUILD_DIR }}/rootfs"" || true -+ mkdir ""${{ env.BUILD_DIR }}/rootfs"" -+ tar -x -C ""${{ env.BUILD_DIR }}/rootfs"" -f ""release/images/%s-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"" -+ docker plugin create ""${{ env.IMAGE_TAG }}${{ steps.platform.outputs.plugin_arch }}"" ""${{ env.BUILD_DIR }}"" -+ docker plugin push ""${{ env.IMAGE_TAG }}${{ steps.platform.outputs.plugin_arch }}"" -+ ""working-directory"": ""release"" -+ ""strategy"": -+ ""matrix"": -+ ""platform"": -+ - ""linux/amd64"" -+ - ""linux/arm64"" - ""promtail"": - ""env"": - ""BUILD_TIMEOUT"": 60 -diff --git a/.github/workflows/minor-release-pr.yml b/.github/workflows/minor-release-pr.yml -index 807e52afab253..0db833dec9a2f 100644 ---- a/.github/workflows/minor-release-pr.yml -+++ b/.github/workflows/minor-release-pr.yml -@@ -31,6 +31,7 @@ jobs: - - ""loki"" - - ""loki-canary"" - - ""loki-canary-boringcrypto"" -+ - ""loki-docker-driver"" - - ""promtail"" - - ""querytee"" - runs-on: ""ubuntu-latest"" -@@ -598,6 +599,86 @@ jobs: - - ""linux/amd64"" - - ""linux/arm64"" - - ""linux/arm"" -+ loki-docker-driver: -+ needs: -+ - ""version"" -+ runs-on: ""ubuntu-latest"" -+ steps: -+ - name: ""pull release library code"" -+ uses: ""actions/checkout@v4"" -+ with: -+ path: ""lib"" -+ ref: ""${{ env.RELEASE_LIB_REF }}"" -+ repository: ""grafana/loki-release"" -+ - name: ""pull code to release"" -+ uses: ""actions/checkout@v4"" -+ with: -+ path: ""release"" -+ repository: ""${{ env.RELEASE_REPO }}"" -+ - name: ""setup node"" -+ uses: ""actions/setup-node@v4"" -+ with: -+ node-version: 20 -+ - name: ""auth gcs"" -+ uses: ""google-github-actions/auth@v2"" -+ with: -+ credentials_json: ""${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"" -+ - name: ""Set up QEMU"" -+ uses: ""docker/setup-qemu-action@v3"" -+ - name: ""set up docker buildx"" -+ uses: ""docker/setup-buildx-action@v3"" -+ - id: ""platform"" -+ name: ""parse image platform"" -+ run: | -+ mkdir -p images -+ -+ platform=""$(echo ""${{ matrix.platform}}"" | sed ""s/\(.*\)\/\(.*\)/\1-\2/"")"" -+ echo ""platform=${platform}"" >> $GITHUB_OUTPUT -+ echo ""platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)"" >> $GITHUB_OUTPUT -+ if [[ ""${platform}"" == ""linux/arm64"" ]]; then -+ echo ""plugin_arch=-arm64"" >> $GITHUB_OUTPUT -+ else -+ echo ""plugin_arch="" >> $GITHUB_OUTPUT -+ fi -+ working-directory: ""release"" -+ - env: -+ IMAGE_TAG: ""${{ needs.version.outputs.version }}"" -+ if: ""${{ fromJSON(needs.version.outputs.pr_created) }}"" -+ name: ""Build and export"" -+ timeout-minutes: ""${{ fromJSON(env.BUILD_TIMEOUT) }}"" -+ uses: ""docker/build-push-action@v6"" -+ with: -+ build-args: ""IMAGE_TAG=${{ needs.version.outputs.version }},GOARCH=${{ steps.platform.outputs.platform_short }}"" -+ context: ""release"" -+ file: ""release/clients/cmd/docker-driver/Dockerfile"" -+ outputs: ""type=docker,dest=release/images/grafana/loki-docker-driver-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"" -+ platforms: ""${{ matrix.platform }}"" -+ push: false -+ tags: ""${{ env.IMAGE_PREFIX }}/grafana/loki-docker-driver:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"" -+ - env: -+ BUILD_DIR: ""release/clients/cmd/docker-driver"" -+ IMAGE_TAG: ""${{ needs.version.outputs.version }}"" -+ if: ""${{ fromJSON(needs.version.outputs.pr_created) }}"" -+ name: ""Package as Docker plugin"" -+ run: | -+ rm -rf ""${{ env.BUILD_DIR }}/rootfs"" || true -+ mkdir ""${{ env.BUILD_DIR }}/rootfs"" -+ tar -x -C ""${{ env.BUILD_DIR }}/rootfs"" -f ""release/images/%s-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"" -+ docker plugin create ""${{ env.IMAGE_TAG }}${{ steps.platform.outputs.plugin_arch }}"" ""${{ env.BUILD_DIR }}"" -+ working-directory: ""release"" -+ - if: ""${{ fromJSON(needs.version.outputs.pr_created) }}"" -+ name: ""upload artifacts"" -+ uses: ""google-github-actions/upload-cloud-storage@v2"" -+ with: -+ destination: ""${{ env.BUILD_ARTIFACTS_BUCKET }}/${{ github.sha }}/images"" -+ path: ""release/images/grafana/loki-docker-driver-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"" -+ process_gcloudignore: false -+ strategy: -+ fail-fast: true -+ matrix: -+ platform: -+ - ""linux/amd64"" -+ - ""linux/arm64"" - promtail: - needs: - - ""version"" -diff --git a/.github/workflows/patch-release-pr.yml b/.github/workflows/patch-release-pr.yml -index 840f1d1d3f49e..f1b904d22f407 100644 ---- a/.github/workflows/patch-release-pr.yml -+++ b/.github/workflows/patch-release-pr.yml -@@ -31,6 +31,7 @@ jobs: - - ""loki"" - - ""loki-canary"" - - ""loki-canary-boringcrypto"" -+ - ""loki-docker-driver"" - - ""promtail"" - - ""querytee"" - runs-on: ""ubuntu-latest"" -@@ -598,6 +599,86 @@ jobs: - - ""linux/amd64"" - - ""linux/arm64"" - - ""linux/arm"" -+ loki-docker-driver: -+ needs: -+ - ""version"" -+ runs-on: ""ubuntu-latest"" -+ steps: -+ - name: ""pull release library code"" -+ uses: ""actions/checkout@v4"" -+ with: -+ path: ""lib"" -+ ref: ""${{ env.RELEASE_LIB_REF }}"" -+ repository: ""grafana/loki-release"" -+ - name: ""pull code to release"" -+ uses: ""actions/checkout@v4"" -+ with: -+ path: ""release"" -+ repository: ""${{ env.RELEASE_REPO }}"" -+ - name: ""setup node"" -+ uses: ""actions/setup-node@v4"" -+ with: -+ node-version: 20 -+ - name: ""auth gcs"" -+ uses: ""google-github-actions/auth@v2"" -+ with: -+ credentials_json: ""${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"" -+ - name: ""Set up QEMU"" -+ uses: ""docker/setup-qemu-action@v3"" -+ - name: ""set up docker buildx"" -+ uses: ""docker/setup-buildx-action@v3"" -+ - id: ""platform"" -+ name: ""parse image platform"" -+ run: | -+ mkdir -p images -+ -+ platform=""$(echo ""${{ matrix.platform}}"" | sed ""s/\(.*\)\/\(.*\)/\1-\2/"")"" -+ echo ""platform=${platform}"" >> $GITHUB_OUTPUT -+ echo ""platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)"" >> $GITHUB_OUTPUT -+ if [[ ""${platform}"" == ""linux/arm64"" ]]; then -+ echo ""plugin_arch=-arm64"" >> $GITHUB_OUTPUT -+ else -+ echo ""plugin_arch="" >> $GITHUB_OUTPUT -+ fi -+ working-directory: ""release"" -+ - env: -+ IMAGE_TAG: ""${{ needs.version.outputs.version }}"" -+ if: ""${{ fromJSON(needs.version.outputs.pr_created) }}"" -+ name: ""Build and export"" -+ timeout-minutes: ""${{ fromJSON(env.BUILD_TIMEOUT) }}"" -+ uses: ""docker/build-push-action@v6"" -+ with: -+ build-args: ""IMAGE_TAG=${{ needs.version.outputs.version }},GOARCH=${{ steps.platform.outputs.platform_short }}"" -+ context: ""release"" -+ file: ""release/clients/cmd/docker-driver/Dockerfile"" -+ outputs: ""type=docker,dest=release/images/grafana/loki-docker-driver-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"" -+ platforms: ""${{ matrix.platform }}"" -+ push: false -+ tags: ""${{ env.IMAGE_PREFIX }}/grafana/loki-docker-driver:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"" -+ - env: -+ BUILD_DIR: ""release/clients/cmd/docker-driver"" -+ IMAGE_TAG: ""${{ needs.version.outputs.version }}"" -+ if: ""${{ fromJSON(needs.version.outputs.pr_created) }}"" -+ name: ""Package as Docker plugin"" -+ run: | -+ rm -rf ""${{ env.BUILD_DIR }}/rootfs"" || true -+ mkdir ""${{ env.BUILD_DIR }}/rootfs"" -+ tar -x -C ""${{ env.BUILD_DIR }}/rootfs"" -f ""release/images/%s-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"" -+ docker plugin create ""${{ env.IMAGE_TAG }}${{ steps.platform.outputs.plugin_arch }}"" ""${{ env.BUILD_DIR }}"" -+ working-directory: ""release"" -+ - if: ""${{ fromJSON(needs.version.outputs.pr_created) }}"" -+ name: ""upload artifacts"" -+ uses: ""google-github-actions/upload-cloud-storage@v2"" -+ with: -+ destination: ""${{ env.BUILD_ARTIFACTS_BUCKET }}/${{ github.sha }}/images"" -+ path: ""release/images/grafana/loki-docker-driver-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"" -+ process_gcloudignore: false -+ strategy: -+ fail-fast: true -+ matrix: -+ platform: -+ - ""linux/amd64"" -+ - ""linux/arm64"" - promtail: - needs: - - ""version"" -diff --git a/Makefile b/Makefile -index c5afc7b669d64..2942a2409d13a 100644 ---- a/Makefile -+++ b/Makefile -@@ -458,13 +458,17 @@ endif - LOKI_DOCKER_DRIVER ?= ""grafana/loki-docker-driver"" - PLUGIN_TAG ?= $(IMAGE_TAG) - PLUGIN_ARCH ?= -+PLUGIN_BUILD_ARGS ?= -+ifeq (""$(PLUGIN_ARCH)"", ""-arm64"") -+ PLUGIN_BUILD_ARGS = --build-arg GOARCH=arm64 -+endif - - # build-rootfs - # builds the plugin rootfs - define build-rootfs - rm -rf clients/cmd/docker-driver/rootfs || true - mkdir clients/cmd/docker-driver/rootfs -- docker build --build-arg $(BUILD_IMAGE) -t rootfsimage -f clients/cmd/docker-driver/Dockerfile . -+ docker build $(PLUGIN_BUILD_ARGS) --build-arg $(BUILD_IMAGE) -t rootfsimage -f clients/cmd/docker-driver/Dockerfile . - - ID=$$(docker create rootfsimage true) && \ - (docker export $$ID | tar -x -C clients/cmd/docker-driver/rootfs) && \ -@@ -481,7 +485,7 @@ docker-driver: docker-driver-clean ## build the docker-driver executable - docker plugin create $(LOKI_DOCKER_DRIVER):main$(PLUGIN_ARCH) clients/cmd/docker-driver - - clients/cmd/docker-driver/docker-driver: -- CGO_ENABLED=0 go build $(GO_FLAGS) -o $@ ./$(@D) -+ CGO_ENABLED=0 GOARCH=$(GOARCH) go build $(GO_FLAGS) -o $@ ./$(@D) - - docker-driver-push: docker-driver - ifndef DOCKER_PASSWORD -diff --git a/clients/cmd/docker-driver/Dockerfile b/clients/cmd/docker-driver/Dockerfile -index 672556240f19b..b2a2ba8069857 100644 ---- a/clients/cmd/docker-driver/Dockerfile -+++ b/clients/cmd/docker-driver/Dockerfile -@@ -1,16 +1,29 @@ - ARG BUILD_IMAGE=grafana/loki-build-image:0.34.0 -+ARG GOARCH=amd64 - # Directories in this file are referenced from the root of the project not this folder - # This file is intended to be called from the root like so: --# docker build -t grafana/loki -f cmd/loki/Dockerfile . -+# docker build -t grafana/loki-docker-driver -f clients/cmd/docker-driver/Dockerfile . - --# TODO: add cross-platform support - FROM $BUILD_IMAGE AS build - COPY . /src/loki - WORKDIR /src/loki --RUN make clean && make BUILD_IN_CONTAINER=false clients/cmd/docker-driver/docker-driver - --FROM alpine:3.20.3 --RUN apk add --update --no-cache ca-certificates tzdata -+ARG GOARCH -+RUN make clean && make BUILD_IN_CONTAINER=false GOARCH=${GOARCH} clients/cmd/docker-driver/docker-driver -+ -+FROM alpine:3.20.3 AS temp -+ -+ARG GOARCH -+ -+RUN apk add --update --no-cache --arch=${GOARCH} ca-certificates tzdata -+ -+FROM --platform=linux/${GOARCH} alpine:3.20.3 -+ -+COPY --from=temp /etc/ca-certificates.conf /etc/ca-certificates.conf -+COPY --from=temp /usr/share/ca-certificates /usr/share/ca-certificates -+COPY --from=temp /usr/share/zoneinfo /usr/share/zoneinfo -+ - COPY --from=build /src/loki/clients/cmd/docker-driver/docker-driver /bin/docker-driver -+ - WORKDIR /bin/ - ENTRYPOINT [ ""/bin/docker-driver"" ] -diff --git a/docs/sources/send-data/docker-driver/_index.md b/docs/sources/send-data/docker-driver/_index.md -index 092857732c807..5fdb9c208eecd 100644 ---- a/docs/sources/send-data/docker-driver/_index.md -+++ b/docs/sources/send-data/docker-driver/_index.md -@@ -31,6 +31,9 @@ Run the following command to install the plugin, updating the release version if - ```bash - docker plugin install grafana/loki-docker-driver:2.9.2 --alias loki --grant-all-permissions - ``` -+{{% admonition type=""note"" %}} -+Add `-arm64` to the image tag for AMR64 hosts. -+{{% /admonition %}} - - To check installed plugins, use the `docker plugin ls` command. - Plugins that have started successfully are listed as enabled:",feat,"build the Docker Driver for arm64 (#9247) - -Co-authored-by: Trevor Whitney " -9b47b19f1822736d93bce760bcffa6812bfe6def,2025-02-17 16:45:04,renovate[bot],"fix(deps): update dependency @radix-ui/react-toast to v1.2.6 (main) (#16320) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json -index da60c332c91a9..960cde29df752 100644 ---- a/pkg/ui/frontend/package-lock.json -+++ b/pkg/ui/frontend/package-lock.json -@@ -2906,23 +2906,146 @@ - } - }, - ""node_modules/@radix-ui/react-toast"": { -- ""version"": ""1.2.5"", -- ""resolved"": ""https://registry.npmjs.org/@radix-ui/react-toast/-/react-toast-1.2.5.tgz"", -- ""integrity"": ""sha512-ZzUsAaOx8NdXZZKcFNDhbSlbsCUy8qQWmzTdgrlrhhZAOx2ofLtKrBDW9fkqhFvXgmtv560Uj16pkLkqML7SHA=="", -+ ""version"": ""1.2.6"", -+ ""resolved"": ""https://registry.npmjs.org/@radix-ui/react-toast/-/react-toast-1.2.6.tgz"", -+ ""integrity"": ""sha512-gN4dpuIVKEgpLn1z5FhzT9mYRUitbfZq9XqN/7kkBMUgFTzTG8x/KszWJugJXHcwxckY8xcKDZPz7kG3o6DsUA=="", - ""license"": ""MIT"", - ""dependencies"": { - ""@radix-ui/primitive"": ""1.1.1"", -- ""@radix-ui/react-collection"": ""1.1.1"", -+ ""@radix-ui/react-collection"": ""1.1.2"", - ""@radix-ui/react-compose-refs"": ""1.1.1"", - ""@radix-ui/react-context"": ""1.1.1"", -- ""@radix-ui/react-dismissable-layer"": ""1.1.4"", -- ""@radix-ui/react-portal"": ""1.1.3"", -+ ""@radix-ui/react-dismissable-layer"": ""1.1.5"", -+ ""@radix-ui/react-portal"": ""1.1.4"", - ""@radix-ui/react-presence"": ""1.1.2"", -- ""@radix-ui/react-primitive"": ""2.0.1"", -+ ""@radix-ui/react-primitive"": ""2.0.2"", - ""@radix-ui/react-use-callback-ref"": ""1.1.0"", - ""@radix-ui/react-use-controllable-state"": ""1.1.0"", - ""@radix-ui/react-use-layout-effect"": ""1.1.0"", -- ""@radix-ui/react-visually-hidden"": ""1.1.1"" -+ ""@radix-ui/react-visually-hidden"": ""1.1.2"" -+ }, -+ ""peerDependencies"": { -+ ""@types/react"": ""*"", -+ ""@types/react-dom"": ""*"", -+ ""react"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"", -+ ""react-dom"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"" -+ }, -+ ""peerDependenciesMeta"": { -+ ""@types/react"": { -+ ""optional"": true -+ }, -+ ""@types/react-dom"": { -+ ""optional"": true -+ } -+ } -+ }, -+ ""node_modules/@radix-ui/react-toast/node_modules/@radix-ui/react-collection"": { -+ ""version"": ""1.1.2"", -+ ""resolved"": ""https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.2.tgz"", -+ ""integrity"": ""sha512-9z54IEKRxIa9VityapoEYMuByaG42iSy1ZXlY2KcuLSEtq8x4987/N6m15ppoMffgZX72gER2uHe1D9Y6Unlcw=="", -+ ""license"": ""MIT"", -+ ""dependencies"": { -+ ""@radix-ui/react-compose-refs"": ""1.1.1"", -+ ""@radix-ui/react-context"": ""1.1.1"", -+ ""@radix-ui/react-primitive"": ""2.0.2"", -+ ""@radix-ui/react-slot"": ""1.1.2"" -+ }, -+ ""peerDependencies"": { -+ ""@types/react"": ""*"", -+ ""@types/react-dom"": ""*"", -+ ""react"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"", -+ ""react-dom"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"" -+ }, -+ ""peerDependenciesMeta"": { -+ ""@types/react"": { -+ ""optional"": true -+ }, -+ ""@types/react-dom"": { -+ ""optional"": true -+ } -+ } -+ }, -+ ""node_modules/@radix-ui/react-toast/node_modules/@radix-ui/react-dismissable-layer"": { -+ ""version"": ""1.1.5"", -+ ""resolved"": ""https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.5.tgz"", -+ ""integrity"": ""sha512-E4TywXY6UsXNRhFrECa5HAvE5/4BFcGyfTyK36gP+pAW1ed7UTK4vKwdr53gAJYwqbfCWC6ATvJa3J3R/9+Qrg=="", -+ ""license"": ""MIT"", -+ ""dependencies"": { -+ ""@radix-ui/primitive"": ""1.1.1"", -+ ""@radix-ui/react-compose-refs"": ""1.1.1"", -+ ""@radix-ui/react-primitive"": ""2.0.2"", -+ ""@radix-ui/react-use-callback-ref"": ""1.1.0"", -+ ""@radix-ui/react-use-escape-keydown"": ""1.1.0"" -+ }, -+ ""peerDependencies"": { -+ ""@types/react"": ""*"", -+ ""@types/react-dom"": ""*"", -+ ""react"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"", -+ ""react-dom"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"" -+ }, -+ ""peerDependenciesMeta"": { -+ ""@types/react"": { -+ ""optional"": true -+ }, -+ ""@types/react-dom"": { -+ ""optional"": true -+ } -+ } -+ }, -+ ""node_modules/@radix-ui/react-toast/node_modules/@radix-ui/react-portal"": { -+ ""version"": ""1.1.4"", -+ ""resolved"": ""https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.4.tgz"", -+ ""integrity"": ""sha512-sn2O9k1rPFYVyKd5LAJfo96JlSGVFpa1fS6UuBJfrZadudiw5tAmru+n1x7aMRQ84qDM71Zh1+SzK5QwU0tJfA=="", -+ ""license"": ""MIT"", -+ ""dependencies"": { -+ ""@radix-ui/react-primitive"": ""2.0.2"", -+ ""@radix-ui/react-use-layout-effect"": ""1.1.0"" -+ }, -+ ""peerDependencies"": { -+ ""@types/react"": ""*"", -+ ""@types/react-dom"": ""*"", -+ ""react"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"", -+ ""react-dom"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"" -+ }, -+ ""peerDependenciesMeta"": { -+ ""@types/react"": { -+ ""optional"": true -+ }, -+ ""@types/react-dom"": { -+ ""optional"": true -+ } -+ } -+ }, -+ ""node_modules/@radix-ui/react-toast/node_modules/@radix-ui/react-primitive"": { -+ ""version"": ""2.0.2"", -+ ""resolved"": ""https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.0.2.tgz"", -+ ""integrity"": ""sha512-Ec/0d38EIuvDF+GZjcMU/Ze6MxntVJYO/fRlCPhCaVUyPY9WTalHJw54tp9sXeJo3tlShWpy41vQRgLRGOuz+w=="", -+ ""license"": ""MIT"", -+ ""dependencies"": { -+ ""@radix-ui/react-slot"": ""1.1.2"" -+ }, -+ ""peerDependencies"": { -+ ""@types/react"": ""*"", -+ ""@types/react-dom"": ""*"", -+ ""react"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"", -+ ""react-dom"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"" -+ }, -+ ""peerDependenciesMeta"": { -+ ""@types/react"": { -+ ""optional"": true -+ }, -+ ""@types/react-dom"": { -+ ""optional"": true -+ } -+ } -+ }, -+ ""node_modules/@radix-ui/react-toast/node_modules/@radix-ui/react-visually-hidden"": { -+ ""version"": ""1.1.2"", -+ ""resolved"": ""https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.1.2.tgz"", -+ ""integrity"": ""sha512-1SzA4ns2M1aRlvxErqhLHsBHoS5eI5UUcI2awAMgGUp4LoaoWOKYmvqDY2s/tltuPkh3Yk77YF/r3IRj+Amx4Q=="", -+ ""license"": ""MIT"", -+ ""dependencies"": { -+ ""@radix-ui/react-primitive"": ""2.0.2"" - }, - ""peerDependencies"": { - ""@types/react"": ""*"",",fix,"update dependency @radix-ui/react-toast to v1.2.6 (main) (#16320) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -bdcb69540f66e59e0bdebe1650a3cd0685db96af,2024-01-17 12:58:46,Christian Haudum,"Bloom gateway: Add metrics for store operations and chunk ref counts (#11677) - -For better observability of the bloom gateway, this PR adds two -additional metrics that expose the amount of chunk refs pre and post -filtering. This can be used to calculate the filter ratio of the -gateways. - -The PR also adds a metric that observes the latency of the actual -processing time of bloom filters within the worker. - ---------- - -Signed-off-by: Christian Haudum ",False,"diff --git a/integration/loki_micro_services_test.go b/integration/loki_micro_services_test.go -index 1f7dc836b5ff6..0c05d13d8ef3e 100644 ---- a/integration/loki_micro_services_test.go -+++ b/integration/loki_micro_services_test.go -@@ -16,6 +16,7 @@ import ( - ""github.com/prometheus/prometheus/model/labels"" - ""github.com/stretchr/testify/assert"" - ""github.com/stretchr/testify/require"" -+ ""golang.org/x/exp/slices"" - ""google.golang.org/protobuf/proto"" - - ""github.com/grafana/loki/integration/client"" -@@ -1061,7 +1062,7 @@ func TestCategorizedLabels(t *testing.T) { - - func TestBloomFiltersEndToEnd(t *testing.T) { - commonFlags := []string{ -- ""-bloom-compactor.compaction-interval=2s"", -+ ""-bloom-compactor.compaction-interval=10s"", - ""-bloom-compactor.enable-compaction=true"", - ""-bloom-compactor.enabled=true"", - ""-bloom-gateway.enable-filtering=true"", -@@ -1101,7 +1102,7 @@ func TestBloomFiltersEndToEnd(t *testing.T) { - ""-target=index-gateway"", - )..., - ) -- _ = clu.AddComponent( -+ tBloomGateway = clu.AddComponent( - ""bloom-gateway"", - append( - commonFlags, -@@ -1136,7 +1137,7 @@ func TestBloomFiltersEndToEnd(t *testing.T) { - ""-tsdb.shipper.index-gateway-client.server-address=""+tIndexGateway.GRPCURL(), - )..., - ) -- _ = clu.AddComponent( -+ tBloomCompactor = clu.AddComponent( - ""bloom-compactor"", - append( - commonFlags, -@@ -1186,6 +1187,12 @@ func TestBloomFiltersEndToEnd(t *testing.T) { - cliIndexGateway := client.New(tenantID, """", tIndexGateway.HTTPURL()) - cliIndexGateway.Now = now - -+ cliBloomGateway := client.New(tenantID, """", tBloomGateway.HTTPURL()) -+ cliBloomGateway.Now = now -+ -+ cliBloomCompactor := client.New(tenantID, """", tBloomCompactor.HTTPURL()) -+ cliBloomCompactor.Now = now -+ - lineTpl := `caller=loki_micro_services_test.go msg=""push log line"" id=""%s""` - // ingest logs from 10 different pods - // each line contains a random, unique string -@@ -1206,7 +1213,14 @@ func TestBloomFiltersEndToEnd(t *testing.T) { - require.NoError(t, tIngester.Restart()) - - // wait for compactor to compact index and for bloom compactor to build bloom filters -- time.Sleep(10 * time.Second) -+ require.Eventually(t, func() bool { -+ // verify metrics that observe usage of block for filtering -+ metrics, err := cliBloomCompactor.Metrics() -+ require.NoError(t, err) -+ successfulRunCount := getMetricValue(t, ""loki_bloomcompactor_runs_completed_total"", metrics) -+ t.Log(""successful bloom compactor runs"", successfulRunCount) -+ return successfulRunCount == 1 -+ }, 30*time.Second, time.Second) - - // use bloom gateway to perform needle in the haystack queries - randIdx := rand.Intn(len(uniqueStrings)) -@@ -1221,22 +1235,44 @@ func TestBloomFiltersEndToEnd(t *testing.T) { - expectedLine := fmt.Sprintf(lineTpl, uniqueStrings[randIdx]) - require.Equal(t, expectedLine, resp.Data.Stream[0].Values[0][1]) - -- // TODO(chaudum): -- // verify that bloom blocks have actually been used for querying -- // atm, we can only verify by logs, so we should add appropriate metrics for -- // uploaded/downloaded blocks and metas -+ // verify metrics that observe usage of block for filtering -+ bloomGwMetrics, err := cliBloomGateway.Metrics() -+ require.NoError(t, err) -+ -+ unfilteredCount := getMetricValue(t, ""loki_bloom_gateway_chunkrefs_pre_filtering"", bloomGwMetrics) -+ require.Equal(t, float64(10), unfilteredCount) -+ -+ filteredCount := getMetricValue(t, ""loki_bloom_gateway_chunkrefs_post_filtering"", bloomGwMetrics) -+ require.Equal(t, float64(1), filteredCount) -+ -+ mf, err := extractMetricFamily(""loki_bloom_gateway_bloom_query_latency"", bloomGwMetrics) -+ require.NoError(t, err) -+ -+ count := getValueFromMetricFamilyWithFunc(mf, &dto.LabelPair{ -+ Name: proto.String(""status""), -+ Value: proto.String(""success""), -+ }, func(m *dto.Metric) uint64 { -+ return m.Histogram.GetSampleCount() -+ }) -+ require.Equal(t, uint64(1), count) - } - - func getValueFromMF(mf *dto.MetricFamily, lbs []*dto.LabelPair) float64 { -+ return getValueFromMetricFamilyWithFunc(mf, lbs[0], func(m *dto.Metric) float64 { return m.Counter.GetValue() }) -+} -+ -+func getValueFromMetricFamilyWithFunc[R any](mf *dto.MetricFamily, lbs *dto.LabelPair, f func(*dto.Metric) R) R { -+ eq := func(e *dto.LabelPair) bool { -+ return e.GetName() == lbs.GetName() && e.GetValue() == lbs.GetValue() -+ } -+ var zero R - for _, m := range mf.Metric { -- if !assert.ObjectsAreEqualValues(lbs, m.GetLabel()) { -+ if !slices.ContainsFunc(m.GetLabel(), eq) { - continue - } -- -- return m.Counter.GetValue() -+ return f(m) - } -- -- return 0 -+ return zero - } - - func assertCacheState(t *testing.T, metrics string, e *expectedCacheState) { -diff --git a/integration/parse_metrics.go b/integration/parse_metrics.go -index 46ea424978562..9f2bf5fc8fc26 100644 ---- a/integration/parse_metrics.go -+++ b/integration/parse_metrics.go -@@ -13,16 +13,24 @@ var ( - ErrInvalidMetricType = fmt.Errorf(""invalid metric type"") - ) - --func extractMetric(metricName, metrics string) (float64, map[string]string, error) { -+func extractMetricFamily(name, metrics string) (*io_prometheus_client.MetricFamily, error) { - var parser expfmt.TextParser - mfs, err := parser.TextToMetricFamilies(strings.NewReader(metrics)) - if err != nil { -- return 0, nil, err -+ return nil, err -+ } -+ -+ mf, ok := mfs[name] -+ if !ok { -+ return nil, ErrNoMetricFound - } -+ return mf, nil -+} - -- mf, found := mfs[metricName] -- if !found { -- return 0, nil, ErrNoMetricFound -+func extractMetric(metricName, metrics string) (float64, map[string]string, error) { -+ mf, err := extractMetricFamily(metricName, metrics) -+ if err != nil { -+ return 0, nil, err - } - - var val float64 -diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go -index b0c3251a0843d..766c05bab457c 100644 ---- a/pkg/bloomgateway/bloomgateway.go -+++ b/pkg/bloomgateway/bloomgateway.go -@@ -80,8 +80,10 @@ var ( - ) - - type metrics struct { -- queueDuration prometheus.Histogram -- inflightRequests prometheus.Summary -+ queueDuration prometheus.Histogram -+ inflightRequests prometheus.Summary -+ chunkRefsUnfiltered prometheus.Counter -+ chunkRefsFiltered prometheus.Counter - } - - func newMetrics(registerer prometheus.Registerer, namespace, subsystem string) *metrics { -@@ -102,9 +104,29 @@ func newMetrics(registerer prometheus.Registerer, namespace, subsystem string) * - MaxAge: time.Minute, - AgeBuckets: 6, - }), -+ chunkRefsUnfiltered: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ -+ Namespace: namespace, -+ Subsystem: subsystem, -+ Name: ""chunkrefs_pre_filtering"", -+ Help: ""Total amount of chunk refs pre filtering. Does not count chunk refs in failed requests."", -+ }), -+ chunkRefsFiltered: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ -+ Namespace: namespace, -+ Subsystem: subsystem, -+ Name: ""chunkrefs_post_filtering"", -+ Help: ""Total amount of chunk refs post filtering."", -+ }), - } - } - -+func (m *metrics) addUnfilteredCount(n int) { -+ m.chunkRefsUnfiltered.Add(float64(n)) -+} -+ -+func (m *metrics) addFilteredCount(n int) { -+ m.chunkRefsFiltered.Add(float64(n)) -+} -+ - // SyncMap is a map structure which can be synchronized using the RWMutex - type SyncMap[k comparable, v any] struct { - sync.RWMutex -@@ -284,8 +306,12 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk - return nil, err - } - -+ numChunksUnfiltered := len(req.Refs) -+ - // Shortcut if request does not contain filters - if len(req.Filters) == 0 { -+ g.metrics.addUnfilteredCount(numChunksUnfiltered) -+ g.metrics.addFilteredCount(len(req.Refs)) - return &logproto.FilterChunkRefResponse{ - ChunkRefs: req.Refs, - }, nil -@@ -313,6 +339,7 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk - responses := responsesPool.Get(requestCount) - defer responsesPool.Put(responses) - -+outer: - for { - select { - case <-ctx.Done(): -@@ -325,17 +352,24 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk - level.Debug(g.logger).Log(""msg"", ""got partial result"", ""task"", task.ID, ""tenant"", tenantID, ""fp_int"", uint64(res.Fp), ""fp_hex"", res.Fp, ""chunks_to_remove"", res.Removals.Len(), ""progress"", fmt.Sprintf(""%d/%d"", len(responses), requestCount)) - // wait for all parts of the full response - if len(responses) == requestCount { -- for _, o := range responses { -- if res.Removals.Len() == 0 { -- continue -- } -- // we must not remove items from req.Refs as long as the worker may iterater over them -- g.removeNotMatchingChunks(req, o) -- } -- return &logproto.FilterChunkRefResponse{ChunkRefs: req.Refs}, nil -+ break outer - } - } - } -+ -+ for _, o := range responses { -+ if o.Removals.Len() == 0 { -+ continue -+ } -+ // we must not remove items from req.Refs as long as the worker may iterater over them -+ g.removeNotMatchingChunks(req, o) -+ } -+ -+ g.metrics.addUnfilteredCount(numChunksUnfiltered) -+ g.metrics.addFilteredCount(len(req.Refs)) -+ -+ level.Debug(g.logger).Log(""msg"", ""return filtered chunk refs"", ""unfiltered"", numChunksUnfiltered, ""filtered"", len(req.Refs)) -+ return &logproto.FilterChunkRefResponse{ChunkRefs: req.Refs}, nil - } - - func (g *Gateway) removeNotMatchingChunks(req *logproto.FilterChunkRefRequest, res v1.Output) { -diff --git a/pkg/bloomgateway/worker.go b/pkg/bloomgateway/worker.go -index a8f9c56d50bab..ce5add3c63f3d 100644 ---- a/pkg/bloomgateway/worker.go -+++ b/pkg/bloomgateway/worker.go -@@ -27,6 +27,7 @@ type workerMetrics struct { - dequeueErrors *prometheus.CounterVec - dequeueWaitTime *prometheus.SummaryVec - storeAccessLatency *prometheus.HistogramVec -+ bloomQueryLatency *prometheus.HistogramVec - } - - func newWorkerMetrics(registerer prometheus.Registerer, namespace, subsystem string) *workerMetrics { -@@ -50,6 +51,13 @@ func newWorkerMetrics(registerer prometheus.Registerer, namespace, subsystem str - Name: ""dequeue_wait_time"", - Help: ""Time spent waiting for dequeuing tasks from queue"", - }, labels), -+ bloomQueryLatency: promauto.With(registerer).NewHistogramVec(prometheus.HistogramOpts{ -+ Namespace: namespace, -+ Subsystem: subsystem, -+ Name: ""bloom_query_latency"", -+ Help: ""Latency in seconds of processing bloom blocks"", -+ }, append(labels, ""status"")), -+ // TODO(chaudum): Move this metric into the bloomshipper - storeAccessLatency: promauto.With(registerer).NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, -@@ -213,29 +221,32 @@ func (w *worker) processBlocksWithCallback(taskCtx context.Context, tenant strin - return w.store.ForEach(taskCtx, tenant, blockRefs, func(bq *v1.BlockQuerier, minFp, maxFp uint64) error { - for _, b := range boundedRefs { - if b.blockRef.MinFingerprint == minFp && b.blockRef.MaxFingerprint == maxFp { -- processBlock(bq, day, b.tasks) -- return nil -+ return w.processBlock(bq, day, b.tasks) - } - } - return nil - }) - } - --func processBlock(blockQuerier *v1.BlockQuerier, day time.Time, tasks []Task) { -+func (w *worker) processBlock(blockQuerier *v1.BlockQuerier, day time.Time, tasks []Task) error { - schema, err := blockQuerier.Schema() - if err != nil { -- for _, t := range tasks { -- t.ErrCh <- errors.Wrap(err, ""failed to get block schema"") -- } -+ return err - } - - tokenizer := v1.NewNGramTokenizer(schema.NGramLen(), 0) - it := newTaskMergeIterator(day, tokenizer, tasks...) - fq := blockQuerier.Fuse([]v1.PeekingIterator[v1.Request]{it}) -+ -+ start := time.Now() - err = fq.Run() -+ duration := time.Since(start).Seconds() -+ - if err != nil { -- for _, t := range tasks { -- t.ErrCh <- errors.Wrap(err, ""failed to run chunk check"") -- } -+ w.metrics.bloomQueryLatency.WithLabelValues(w.id, ""failure"").Observe(duration) -+ return err - } -+ -+ w.metrics.bloomQueryLatency.WithLabelValues(w.id, ""success"").Observe(duration) -+ return nil - }",unknown,"Bloom gateway: Add metrics for store operations and chunk ref counts (#11677) - -For better observability of the bloom gateway, this PR adds two -additional metrics that expose the amount of chunk refs pre and post -filtering. This can be used to calculate the filter ratio of the -gateways. - -The PR also adds a metric that observes the latency of the actual -processing time of bloom filters within the worker. - ---------- - -Signed-off-by: Christian Haudum " -7e658e8766ee8601c0474b5644b6c87ca72433f5,2021-06-10 12:39:00,Cyril Tovena,"Fixes a flaky retention test. (#3833) - -This was due because the condition was using a string array that may be not in correct order. - -Signed-off-by: Cyril Tovena ",False,"diff --git a/pkg/storage/stores/shipper/compactor/retention/retention_test.go b/pkg/storage/stores/shipper/compactor/retention/retention_test.go -index 3f7e28906cc1c..4799c6b280796 100644 ---- a/pkg/storage/stores/shipper/compactor/retention/retention_test.go -+++ b/pkg/storage/stores/shipper/compactor/retention/retention_test.go -@@ -5,6 +5,7 @@ import ( - ""crypto/sha256"" - ""encoding/base64"" - ""path/filepath"" -+ ""sort"" - ""strconv"" - ""strings"" - ""sync"" -@@ -156,10 +157,13 @@ func Test_Retention(t *testing.T) { - expectDeleted = append(expectDeleted, tt.chunks[i].ExternalKey()) - } - } -+ sort.Strings(expectDeleted) - store.Stop() - if len(expectDeleted) != 0 { - require.Eventually(t, func() bool { -- return assert.ObjectsAreEqual(expectDeleted, chunkClient.getDeletedChunkIds()) -+ actual := chunkClient.getDeletedChunkIds() -+ sort.Strings(actual) -+ return assert.ObjectsAreEqual(expectDeleted, actual) - }, 10*time.Second, 1*time.Second) - } - })",unknown,"Fixes a flaky retention test. (#3833) - -This was due because the condition was using a string array that may be not in correct order. - -Signed-off-by: Cyril Tovena " -e9446a93dea5a5f853d756c4255997992177c8f3,2024-01-04 01:17:01,Karsten Jeschkies,Run checks in GitHub actions. (#11475),False,"diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml -new file mode 100644 -index 0000000000000..987df91c8c3e3 ---- /dev/null -+++ b/.github/workflows/checks.yml -@@ -0,0 +1,20 @@ -+name: Checks -+on: [push] -+jobs: -+ checks: -+ runs-on: ubuntu-latest -+ env: -+ BUILD_IN_CONTAINER: false -+ container: -+ image: grafana/loki-build-image:0.32.0 -+ steps: -+ - uses: actions/checkout@v4 -+ - run: git config --global --add safe.directory ""$GITHUB_WORKSPACE"" -+ - run: make lint -+ - run: make check-doc -+ - run: make check-mod -+ - run: make validate-example-configs -+ - run: make check-example-config-doc -+ - run: make check-drone-drift -+ - run: make check-generated-files -+ - run: make test",unknown,Run checks in GitHub actions. (#11475) -69c1d1c0e061defee9397f905172b7d66f22691f,2020-05-06 01:34:45,avii-ridge,"Add extraOutputs variable to support multiple outputs for fluent-bit (#2040) - -* Adding support for extra outputs - -* added documentation",False,"diff --git a/production/helm/fluent-bit/Chart.yaml b/production/helm/fluent-bit/Chart.yaml -index 89f850d1d225f..ebad2d67bb005 100644 ---- a/production/helm/fluent-bit/Chart.yaml -+++ b/production/helm/fluent-bit/Chart.yaml -@@ -1,6 +1,6 @@ - apiVersion: ""v1"" - name: fluent-bit --version: 0.1.1 -+version: 0.1.2 - appVersion: v1.4.1 - kubeVersion: ""^1.10.0-0"" - description: ""Uses fluent-bit Loki go plugin for gathering logs and sending them to Loki"" -diff --git a/production/helm/fluent-bit/README.md b/production/helm/fluent-bit/README.md -index ae6562737e3c8..df26abbc5892a 100644 ---- a/production/helm/fluent-bit/README.md -+++ b/production/helm/fluent-bit/README.md -@@ -84,6 +84,7 @@ For more details, read the [Fluent Bit documentation](../../../cmd/fluent-bit/RE - | `config.labels` | A set of labels to send for every log | `'{job=""fluent-bit""}'` | - | `config.labelMap` | Mapping of labels from a record. See [Fluent Bit documentation](../../../cmd/fluent-bit/README.md) | | - | `config.parsers` | Definition of extras fluent bit parsers. See [Official Fluent Bit documentation](https://docs.fluentbit.io/manual/filter/parser). The format is a sequence of mappings where each key is the same as the one in the [PARSER] section of parsers.conf file | `[]` | -+| `config.extraOutputs` | Definition of extras fluent bit outputs. See [Official Fluent Bit documentation](https://docs.fluentbit.io/manual/pipeline/outputs/). The format is a sequence of mappings where each key is the same as the one in the [OUTPUT] | `[]` | - | `affinity` | [affinity][affinity] settings for pod assignment | `{}` | - | `annotations` | Annotations to add to Kubernetes resources. | `{}` | - | `deploymentStrategy` | The deployment strategy to use with the daemonset | `RollingUpdate` | -diff --git a/production/helm/fluent-bit/templates/configmap.yaml b/production/helm/fluent-bit/templates/configmap.yaml -index 85f82e0e43b53..6b86b51289584 100644 ---- a/production/helm/fluent-bit/templates/configmap.yaml -+++ b/production/helm/fluent-bit/templates/configmap.yaml -@@ -44,7 +44,12 @@ data: - LabelMapPath /fluent-bit/etc/labelmap.json - LineFormat {{ .Values.config.lineFormat }} - LogLevel {{ .Values.config.loglevel }} -- -+ {{- range $extraOutput := .Values.config.extraOutputs }} -+ [OUTPUT] -+ {{- range $key,$value := $extraOutput }} -+ {{ $key }} {{ $value }} -+ {{- end }} -+ {{- end }} - parsers.conf: |- - [PARSER] - Name docker -diff --git a/production/helm/fluent-bit/values.yaml b/production/helm/fluent-bit/values.yaml -index d9434f25cc663..b47c6decdb8b5 100644 ---- a/production/helm/fluent-bit/values.yaml -+++ b/production/helm/fluent-bit/values.yaml -@@ -31,6 +31,10 @@ config: - # Time_Key: time - # Time_Format: ""%d/%b/%Y:%H:%M:%S %z"" - -+ # extraOutputs: # Allow to define extra outputs in addition to the one automatically created -+ # - Name: stdout -+ # Format: json -+ # json_date_format: time - - affinity: {}",unknown,"Add extraOutputs variable to support multiple outputs for fluent-bit (#2040) - -* Adding support for extra outputs - -* added documentation" -a8a4ba742d229daad517f745660ed82b6d0b048c,2023-01-16 17:01:38,dependabot[bot],"Bump github.com/Masterminds/sprig/v3 from 3.2.2 to 3.2.3 (#8159) - -Bumps -[github.com/Masterminds/sprig/v3](https://github.com/Masterminds/sprig) -from 3.2.2 to 3.2.3. -
-Release notes -

Sourced from github.com/Masterminds/sprig/v3's -releases.

-
-

v3.2.3

-

Changed

- -
-
-
-Changelog -

Sourced from github.com/Masterminds/sprig/v3's -changelog.

-
-

Release 3.2.3 (2022-11-29)

-

Changed

- -
-
-
-Commits -
    -
  • 581758e -Updating the changelog for the 3.2.3 release
  • -
  • 5787448 -Updating changelog for 3.2.2 release
  • -
  • 8489c3e -Merge pull request #354 -from mattfarina/bump-crypto-v0.3.0
  • -
  • 42ac6ac -Updating crypto library
  • -
  • d65147b -Merge pull request #353 -from mattfarina/bump-semver-3.2.0
  • -
  • 92ac1ae -Updating semver package
  • -
  • ce20d69 -Merge pull request #313 -from book987/master
  • -
  • f9a478a -Merge pull request #334 -from aJetHorn/patch-1
  • -
  • 58a4f65 -Merge pull request #349 -from mattfarina/bump-go-19
  • -
  • 32424cc -Merge pull request #347 -from neelayu/patch-1
  • -
  • Additional commits viewable in compare -view
  • -
-
-
- - -[![Dependabot compatibility -score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/Masterminds/sprig/v3&package-manager=go_modules&previous-version=3.2.2&new-version=3.2.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) - -Dependabot will resolve any conflicts with this PR as long as you don't -alter it yourself. You can also trigger a rebase manually by commenting -`@dependabot rebase`. - -[//]: # (dependabot-automerge-start) -[//]: # (dependabot-automerge-end) - ---- - -
-Dependabot commands and options -
- -You can trigger Dependabot actions by commenting on this PR: -- `@dependabot rebase` will rebase this PR -- `@dependabot recreate` will recreate this PR, overwriting any edits -that have been made to it -- `@dependabot merge` will merge this PR after your CI passes on it -- `@dependabot squash and merge` will squash and merge this PR after -your CI passes on it -- `@dependabot cancel merge` will cancel a previously requested merge -and block automerging -- `@dependabot reopen` will reopen this PR if it is closed -- `@dependabot close` will close this PR and stop Dependabot recreating -it. You can achieve the same result by closing it manually -- `@dependabot ignore this major version` will close this PR and stop -Dependabot creating any more for this major version (unless you reopen -the PR or upgrade to it yourself) -- `@dependabot ignore this minor version` will close this PR and stop -Dependabot creating any more for this minor version (unless you reopen -the PR or upgrade to it yourself) -- `@dependabot ignore this dependency` will close this PR and stop -Dependabot creating any more for this dependency (unless you reopen the -PR or upgrade to it yourself) - - -
- -Signed-off-by: dependabot[bot] -Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>",False,"diff --git a/go.mod b/go.mod -index 8d7b0ee12320f..6f4b1005b0f9d 100644 ---- a/go.mod -+++ b/go.mod -@@ -10,7 +10,7 @@ require ( - github.com/Azure/azure-storage-blob-go v0.14.0 - github.com/Azure/go-autorest/autorest/adal v0.9.21 - github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 -- github.com/Masterminds/sprig/v3 v3.2.2 -+ github.com/Masterminds/sprig/v3 v3.2.3 - github.com/NYTimes/gziphandler v1.1.1 - github.com/Shopify/sarama v1.30.0 - github.com/Workiva/go-datastructures v1.0.53 -@@ -141,7 +141,7 @@ require ( - github.com/Azure/go-autorest/logger v0.2.1 // indirect - github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/Masterminds/goutils v1.1.1 // indirect -- github.com/Masterminds/semver/v3 v3.1.1 // indirect -+ github.com/Masterminds/semver/v3 v3.2.0 // indirect - github.com/Microsoft/go-winio v0.5.1 // indirect - github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect - github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect -@@ -221,7 +221,7 @@ require ( - github.com/hashicorp/go-uuid v1.0.2 // indirect - github.com/hashicorp/memberlist v0.5.0 // indirect - github.com/hashicorp/serf v0.9.7 // indirect -- github.com/huandu/xstrings v1.3.1 // indirect -+ github.com/huandu/xstrings v1.3.3 // indirect - github.com/jcmturner/aescts/v2 v2.0.0 // indirect - github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect - github.com/jcmturner/gofork v1.0.0 // indirect -diff --git a/go.sum b/go.sum -index 23220e6be86ba..d21bc51d716ec 100644 ---- a/go.sum -+++ b/go.sum -@@ -146,10 +146,10 @@ github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20220216145902-b5e69 - github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20220216145902-b5e698eff68e/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= - github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= - github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= --github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= --github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= --github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= --github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= -+github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= -+github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -+github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -+github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= - github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM= - github.com/Microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:CukZ/G66zxXtI+h/VcVn3eVVDGDHfXM2zVILF7bMmsg= - github.com/Microsoft/go-winio v0.4.3/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -@@ -854,8 +854,8 @@ github.com/heroku/x v0.0.50 h1:CA0AXkSumucVJD+T+x+6c7X1iDEb+40F8GNgH5UjJwo= - github.com/heroku/x v0.0.50/go.mod h1:vr+jORZ6sG3wgEq2FAS6UbOUrz9/DxpQGN/xPHVgbSM= - github.com/hetznercloud/hcloud-go v1.35.3 h1:WCmFAhLRooih2QHAsbCbEdpIHnshQQmrPqsr3rHE1Ow= - github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= --github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs= --github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -+github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= -+github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= - github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= - github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= - github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -@@ -1509,7 +1509,6 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U - golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= - golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= - golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= --golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= - golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= - golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= - golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -@@ -1523,6 +1522,7 @@ golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0 - golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= - golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= - golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -+golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= - golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8= - golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= - golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -@@ -1646,6 +1646,7 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug - golang.org/x/net v0.0.0-20220907135653-1e95f45603a7/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= - golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= - golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= - golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= - golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= - golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -@@ -1800,11 +1801,13 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc - golang.org/x/sys v0.0.0-20220908150016-7ac13a9a928d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= - golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= - golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= - golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg= - golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= - golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml -index fdbdf1448c366..c87d1c4b90e86 100644 ---- a/vendor/github.com/Masterminds/semver/v3/.golangci.yml -+++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml -@@ -4,23 +4,27 @@ run: - linters: - disable-all: true - enable: -+ - misspell -+ - structcheck -+ - govet -+ - staticcheck - - deadcode -- - dupl - - errcheck -- - gofmt -- - goimports -- - golint -- - gosimple -- - govet -+ - varcheck -+ - unparam - - ineffassign -- - misspell - - nakedret -- - structcheck -+ - gocyclo -+ - dupl -+ - goimports -+ - revive -+ - gosec -+ - gosimple -+ - typecheck - - unused -- - varcheck - - linters-settings: - gofmt: - simplify: true - dupl: -- threshold: 400 -+ threshold: 600 -diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md -index 1f90c38d260d0..f12626423a3e4 100644 ---- a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md -+++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md -@@ -1,5 +1,25 @@ - # Changelog - -+## 3.2.0 (2022-11-28) -+ -+### Added -+ -+- #190: Added text marshaling and unmarshaling -+- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg) -+- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker) -+- #179: Added New() version constructor (thanks @kazhuravlev) -+ -+### Changed -+ -+- #182/#183: Updated CI testing setup -+ -+### Fixed -+ -+- #186: Fixing issue where validation of constraint section gave false positives -+- #176: Fix constraints check with *-0 (thanks @mtt0) -+- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni) -+- #161: Fixed godoc (thanks @afirth) -+ - ## 3.1.1 (2020-11-23) - - ### Fixed -diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go -index 547613f044f21..203072e464685 100644 ---- a/vendor/github.com/Masterminds/semver/v3/constraints.go -+++ b/vendor/github.com/Masterminds/semver/v3/constraints.go -@@ -134,6 +134,23 @@ func (cs Constraints) String() string { - return strings.Join(buf, "" || "") - } - -+// UnmarshalText implements the encoding.TextUnmarshaler interface. -+func (cs *Constraints) UnmarshalText(text []byte) error { -+ temp, err := NewConstraint(string(text)) -+ if err != nil { -+ return err -+ } -+ -+ *cs = *temp -+ -+ return nil -+} -+ -+// MarshalText implements the encoding.TextMarshaler interface. -+func (cs Constraints) MarshalText() ([]byte, error) { -+ return []byte(cs.String()), nil -+} -+ - var constraintOps map[string]cfunc - var constraintRegex *regexp.Regexp - var constraintRangeRegex *regexp.Regexp -@@ -180,8 +197,13 @@ func init() { - ops, - cvRegex)) - -+ // The first time a constraint shows up will look slightly different from -+ // future times it shows up due to a leading space or comma in a given -+ // string. - validConstraintRegex = regexp.MustCompile(fmt.Sprintf( -- `^(\s*(%s)\s*(%s)\s*\,?)+$`, -+ `^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`, -+ ops, -+ cvRegex, - ops, - cvRegex)) - } -@@ -233,7 +255,7 @@ func parseConstraint(c string) (*constraint, error) { - patchDirty := false - dirty := false - if isX(m[3]) || m[3] == """" { -- ver = ""0.0.0"" -+ ver = fmt.Sprintf(""0.0.0%s"", m[6]) - dirty = true - } else if isX(strings.TrimPrefix(m[4], ""."")) || m[4] == """" { - minorDirty = true -@@ -534,6 +556,10 @@ func constraintCaret(v *Version, c *constraint) (bool, error) { - } - return false, fmt.Errorf(""%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0"", v, c.orig) - } -+ // ^ when the minor is 0 and minor > 0 is =0.0.z -+ if c.con.Minor() == 0 && v.Minor() > 0 { -+ return false, fmt.Errorf(""%s does not have same minor version as %s"", v, c.orig) -+ } - - // At this point the major is 0 and the minor is 0 and not dirty. The patch - // is not dirty so we need to check if they are equal. If they are not equal -diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go -index 391aa46b76df8..74f97caa57f80 100644 ---- a/vendor/github.com/Masterminds/semver/v3/doc.go -+++ b/vendor/github.com/Masterminds/semver/v3/doc.go -@@ -3,12 +3,12 @@ Package semver provides the ability to work with Semantic Versions (http://semve - - Specifically it provides the ability to: - -- * Parse semantic versions -- * Sort semantic versions -- * Check if a semantic version fits within a set of constraints -- * Optionally work with a `v` prefix -+ - Parse semantic versions -+ - Sort semantic versions -+ - Check if a semantic version fits within a set of constraints -+ - Optionally work with a `v` prefix - --Parsing Semantic Versions -+# Parsing Semantic Versions - - There are two functions that can parse semantic versions. The `StrictNewVersion` - function only parses valid version 2 semantic versions as outlined in the -@@ -21,48 +21,48 @@ that can be sorted, compared, and used in constraints. - When parsing a version an optional error can be returned if there is an issue - parsing the version. For example, - -- v, err := semver.NewVersion(""1.2.3-beta.1+b345"") -+ v, err := semver.NewVersion(""1.2.3-beta.1+b345"") - - The version object has methods to get the parts of the version, compare it to - other versions, convert the version back into a string, and get the original - string. For more details please see the documentation - at https://godoc.org/github.com/Masterminds/semver. - --Sorting Semantic Versions -+# Sorting Semantic Versions - - A set of versions can be sorted using the `sort` package from the standard library. - For example, - -- raw := []string{""1.2.3"", ""1.0"", ""1.3"", ""2"", ""0.4.2"",} -- vs := make([]*semver.Version, len(raw)) -- for i, r := range raw { -- v, err := semver.NewVersion(r) -- if err != nil { -- t.Errorf(""Error parsing version: %s"", err) -- } -+ raw := []string{""1.2.3"", ""1.0"", ""1.3"", ""2"", ""0.4.2"",} -+ vs := make([]*semver.Version, len(raw)) -+ for i, r := range raw { -+ v, err := semver.NewVersion(r) -+ if err != nil { -+ t.Errorf(""Error parsing version: %s"", err) -+ } - -- vs[i] = v -- } -+ vs[i] = v -+ } - -- sort.Sort(semver.Collection(vs)) -+ sort.Sort(semver.Collection(vs)) - --Checking Version Constraints and Comparing Versions -+# Checking Version Constraints and Comparing Versions - - There are two methods for comparing versions. One uses comparison methods on - `Version` instances and the other is using Constraints. There are some important - differences to notes between these two methods of comparison. - --1. When two versions are compared using functions such as `Compare`, `LessThan`, -- and others it will follow the specification and always include prereleases -- within the comparison. It will provide an answer valid with the comparison -- spec section at https://semver.org/#spec-item-11 --2. When constraint checking is used for checks or validation it will follow a -- different set of rules that are common for ranges with tools like npm/js -- and Rust/Cargo. This includes considering prereleases to be invalid if the -- ranges does not include on. If you want to have it include pre-releases a -- simple solution is to include `-0` in your range. --3. Constraint ranges can have some complex rules including the shorthard use of -- ~ and ^. For more details on those see the options below. -+ 1. When two versions are compared using functions such as `Compare`, `LessThan`, -+ and others it will follow the specification and always include prereleases -+ within the comparison. It will provide an answer valid with the comparison -+ spec section at https://semver.org/#spec-item-11 -+ 2. When constraint checking is used for checks or validation it will follow a -+ different set of rules that are common for ranges with tools like npm/js -+ and Rust/Cargo. This includes considering prereleases to be invalid if the -+ ranges does not include on. If you want to have it include pre-releases a -+ simple solution is to include `-0` in your range. -+ 3. Constraint ranges can have some complex rules including the shorthard use of -+ ~ and ^. For more details on those see the options below. - - There are differences between the two methods or checking versions because the - comparison methods on `Version` follow the specification while comparison ranges -@@ -76,19 +76,19 @@ patters with their versions. - Checking a version against version constraints is one of the most featureful - parts of the package. - -- c, err := semver.NewConstraint("">= 1.2.3"") -- if err != nil { -- // Handle constraint not being parsable. -- } -+ c, err := semver.NewConstraint("">= 1.2.3"") -+ if err != nil { -+ // Handle constraint not being parsable. -+ } - -- v, err := semver.NewVersion(""1.3"") -- if err != nil { -- // Handle version not being parsable. -- } -- // Check if the version meets the constraints. The a variable will be true. -- a := c.Check(v) -+ v, err := semver.NewVersion(""1.3"") -+ if err != nil { -+ // Handle version not being parsable. -+ } -+ // Check if the version meets the constraints. The a variable will be true. -+ a := c.Check(v) - --Basic Comparisons -+# Basic Comparisons - - There are two elements to the comparisons. First, a comparison string is a list - of comma or space separated AND comparisons. These are then separated by || (OR) -@@ -99,31 +99,31 @@ greater than or equal to 4.2.3. This can also be written as - - The basic comparisons are: - -- * `=`: equal (aliased to no operator) -- * `!=`: not equal -- * `>`: greater than -- * `<`: less than -- * `>=`: greater than or equal to -- * `<=`: less than or equal to -+ - `=`: equal (aliased to no operator) -+ - `!=`: not equal -+ - `>`: greater than -+ - `<`: less than -+ - `>=`: greater than or equal to -+ - `<=`: less than or equal to - --Hyphen Range Comparisons -+# Hyphen Range Comparisons - - There are multiple methods to handle ranges and the first is hyphens ranges. - These look like: - -- * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` -- * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` -+ - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` -+ - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` - --Wildcards In Comparisons -+# Wildcards In Comparisons - - The `x`, `X`, and `*` characters can be used as a wildcard character. This works - for all comparison operators. When used on the `=` operator it falls - back to the tilde operation. For example, - -- * `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` -- * `>= 1.2.x` is equivalent to `>= 1.2.0` -- * `<= 2.x` is equivalent to `<= 3` -- * `*` is equivalent to `>= 0.0.0` -+ - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` -+ - `>= 1.2.x` is equivalent to `>= 1.2.0` -+ - `<= 2.x` is equivalent to `<= 3` -+ - `*` is equivalent to `>= 0.0.0` - - Tilde Range Comparisons (Patch) - -@@ -131,11 +131,11 @@ The tilde (`~`) comparison operator is for patch level ranges when a minor - version is specified and major level changes when the minor number is missing. - For example, - -- * `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` -- * `~1` is equivalent to `>= 1, < 2` -- * `~2.3` is equivalent to `>= 2.3 < 2.4` -- * `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` -- * `~1.x` is equivalent to `>= 1 < 2` -+ - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` -+ - `~1` is equivalent to `>= 1, < 2` -+ - `~2.3` is equivalent to `>= 2.3 < 2.4` -+ - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` -+ - `~1.x` is equivalent to `>= 1 < 2` - - Caret Range Comparisons (Major) - -@@ -144,41 +144,41 @@ The caret (`^`) comparison operator is for major level changes once a stable - as the API stability level. This is useful when comparisons of API versions as a - major change is API breaking. For example, - -- * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` -- * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` -- * `^2.3` is equivalent to `>= 2.3, < 3` -- * `^2.x` is equivalent to `>= 2.0.0, < 3` -- * `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` -- * `^0.2` is equivalent to `>=0.2.0 <0.3.0` -- * `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` -- * `^0.0` is equivalent to `>=0.0.0 <0.1.0` -- * `^0` is equivalent to `>=0.0.0 <1.0.0` -+ - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` -+ - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` -+ - `^2.3` is equivalent to `>= 2.3, < 3` -+ - `^2.x` is equivalent to `>= 2.0.0, < 3` -+ - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` -+ - `^0.2` is equivalent to `>=0.2.0 <0.3.0` -+ - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` -+ - `^0.0` is equivalent to `>=0.0.0 <0.1.0` -+ - `^0` is equivalent to `>=0.0.0 <1.0.0` - --Validation -+# Validation - - In addition to testing a version against a constraint, a version can be validated - against a constraint. When validation fails a slice of errors containing why a - version didn't meet the constraint is returned. For example, - -- c, err := semver.NewConstraint(""<= 1.2.3, >= 1.4"") -- if err != nil { -- // Handle constraint not being parseable. -- } -- -- v, _ := semver.NewVersion(""1.3"") -- if err != nil { -- // Handle version not being parseable. -- } -- -- // Validate a version against a constraint. -- a, msgs := c.Validate(v) -- // a is false -- for _, m := range msgs { -- fmt.Println(m) -- -- // Loops over the errors which would read -- // ""1.3 is greater than 1.2.3"" -- // ""1.3 is less than 1.4"" -- } -+ c, err := semver.NewConstraint(""<= 1.2.3, >= 1.4"") -+ if err != nil { -+ // Handle constraint not being parseable. -+ } -+ -+ v, _ := semver.NewVersion(""1.3"") -+ if err != nil { -+ // Handle version not being parseable. -+ } -+ -+ // Validate a version against a constraint. -+ a, msgs := c.Validate(v) -+ // a is false -+ for _, m := range msgs { -+ fmt.Println(m) -+ -+ // Loops over the errors which would read -+ // ""1.3 is greater than 1.2.3"" -+ // ""1.3 is less than 1.4"" -+ } - */ - package semver -diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go -index d6b9cda3eeb70..7c4bed33474cf 100644 ---- a/vendor/github.com/Masterminds/semver/v3/version.go -+++ b/vendor/github.com/Masterminds/semver/v3/version.go -@@ -55,14 +55,16 @@ func init() { - versionRegex = regexp.MustCompile(""^"" + semVerRegex + ""$"") - } - --const num string = ""0123456789"" --const allowed string = ""abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"" + num -+const ( -+ num string = ""0123456789"" -+ allowed string = ""abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"" + num -+) - - // StrictNewVersion parses a given version and returns an instance of Version or - // an error if unable to parse the version. Only parses valid semantic versions. - // Performs checking that can find errors within the version. --// If you want to coerce a version, such as 1 or 1.2, and perse that as the 1.x --// releases of semver provided use the NewSemver() function. -+// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x -+// releases of semver did, use the NewVersion() function. - func StrictNewVersion(v string) (*Version, error) { - // Parsing here does not use RegEx in order to increase performance and reduce - // allocations. -@@ -207,6 +209,23 @@ func NewVersion(v string) (*Version, error) { - return sv, nil - } - -+// New creates a new instance of Version with each of the parts passed in as -+// arguments instead of parsing a version string. -+func New(major, minor, patch uint64, pre, metadata string) *Version { -+ v := Version{ -+ major: major, -+ minor: minor, -+ patch: patch, -+ pre: pre, -+ metadata: metadata, -+ original: """", -+ } -+ -+ v.original = v.String() -+ -+ return &v -+} -+ - // MustParse parses a given version and panics on error. - func MustParse(v string) *Version { - sv, err := NewVersion(v) -@@ -267,7 +286,6 @@ func (v Version) Metadata() string { - - // originalVPrefix returns the original 'v' prefix if any. - func (v Version) originalVPrefix() string { -- - // Note, only lowercase v is supported as a prefix by the parser. - if v.original != """" && v.original[:1] == ""v"" { - return v.original[:1] -@@ -436,6 +454,23 @@ func (v Version) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) - } - -+// UnmarshalText implements the encoding.TextUnmarshaler interface. -+func (v *Version) UnmarshalText(text []byte) error { -+ temp, err := NewVersion(string(text)) -+ if err != nil { -+ return err -+ } -+ -+ *v = *temp -+ -+ return nil -+} -+ -+// MarshalText implements the encoding.TextMarshaler interface. -+func (v Version) MarshalText() ([]byte, error) { -+ return []byte(v.String()), nil -+} -+ - // Scan implements the SQL.Scanner interface. - func (v *Version) Scan(value interface{}) error { - var s string -@@ -470,7 +505,6 @@ func compareSegment(v, o uint64) int { - } - - func comparePrerelease(v, o string) int { -- - // split the prelease versions by their part. The separator, per the spec, - // is a . - sparts := strings.Split(v, ""."") -@@ -562,7 +596,6 @@ func comparePrePart(s, o string) int { - return 1 - } - return -1 -- - } - - // Like strings.ContainsAny but does an only instead of any. -diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md -index fcdd4e88aed41..2ce45dd4eca63 100644 ---- a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md -+++ b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md -@@ -1,8 +1,21 @@ - # Changelog - -+## Release 3.2.3 (2022-11-29) -+ -+### Changed -+ -+- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) -+- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) -+- #353: Updated masterminds/semver which included bug fixes -+- #354: Updated golang.org/x/crypto which included bug fixes -+ -+## Release 3.2.2 (2021-02-04) -+ -+This is a re-release of 3.2.1 to satisfy something with the Go module system. -+ - ## Release 3.2.1 (2021-02-04) - --### Changed -+### Changed - - - Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) - -diff --git a/vendor/github.com/Masterminds/sprig/v3/README.md b/vendor/github.com/Masterminds/sprig/v3/README.md -index c37ba01c2162b..3e22c60e1a01e 100644 ---- a/vendor/github.com/Masterminds/sprig/v3/README.md -+++ b/vendor/github.com/Masterminds/sprig/v3/README.md -@@ -17,10 +17,9 @@ JavaScript libraries, such as [underscore.js](http://underscorejs.org/). - ## IMPORTANT NOTES - - Sprig leverages [mergo](https://github.com/imdario/mergo) to handle merges. In --its v0.3.9 release there was a behavior change that impacts merging template --functions in sprig. It is currently recommended to use v0.3.8 of that package. --Using v0.3.9 will cause sprig tests to fail. The issue in mergo is tracked at --https://github.com/imdario/mergo/issues/139. -+its v0.3.9 release, there was a behavior change that impacts merging template -+functions in sprig. It is currently recommended to use v0.3.10 or later of that package. -+Using v0.3.9 will cause sprig tests to fail. - - ## Package Versions - -@@ -51,7 +50,7 @@ To load the Sprig `FuncMap`: - ```go - - import ( -- ""github.com/Masterminds/sprig"" -+ ""github.com/Masterminds/sprig/v3"" - ""html/template"" - ) - -diff --git a/vendor/github.com/huandu/xstrings/.travis.yml b/vendor/github.com/huandu/xstrings/.travis.yml -deleted file mode 100644 -index d6460be411e57..0000000000000 ---- a/vendor/github.com/huandu/xstrings/.travis.yml -+++ /dev/null -@@ -1,7 +0,0 @@ --language: go --install: -- - go get golang.org/x/tools/cmd/cover -- - go get github.com/mattn/goveralls --script: -- - go test -v -covermode=count -coverprofile=coverage.out -- - 'if [ ""$TRAVIS_PULL_REQUEST"" = ""false"" ] && [ ! -z ""$COVERALLS_TOKEN"" ]; then $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN; fi' -diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md -index 292bf2f39e13d..750c3c7eb69a7 100644 ---- a/vendor/github.com/huandu/xstrings/README.md -+++ b/vendor/github.com/huandu/xstrings/README.md -@@ -1,7 +1,7 @@ --# xstrings # -+# xstrings - --[![Build Status](https://travis-ci.org/huandu/xstrings.svg?branch=master)](https://travis-ci.org/huandu/xstrings) --[![GoDoc](https://godoc.org/github.com/huandu/xstrings?status.svg)](https://godoc.org/github.com/huandu/xstrings) -+[![Build Status](https://github.com/huandu/xstrings/workflows/Go/badge.svg)](https://github.com/huandu/xstrings/actions) -+[![Go Doc](https://godoc.org/github.com/huandu/xstrings?status.svg)](https://pkg.go.dev/github.com/huandu/xstrings) - [![Go Report](https://goreportcard.com/badge/github.com/huandu/xstrings)](https://goreportcard.com/report/github.com/huandu/xstrings) - [![Coverage Status](https://coveralls.io/repos/github/huandu/xstrings/badge.svg?branch=master)](https://coveralls.io/github/huandu/xstrings?branch=master) - -@@ -9,109 +9,109 @@ Go package [xstrings](https://godoc.org/github.com/huandu/xstrings) is a collect - - All functions are well tested and carefully tuned for performance. - --## Propose a new function ## -+## Propose a new function - - Please review [contributing guideline](CONTRIBUTING.md) and [create new issue](https://github.com/huandu/xstrings/issues) to state why it should be included. - --## Install ## -+## Install - - Use `go get` to install this library. - - go get github.com/huandu/xstrings - --## API document ## -+## API document - - See [GoDoc](https://godoc.org/github.com/huandu/xstrings) for full document. - --## Function list ## -+## Function list - - Go functions have a unique naming style. One, who has experience in other language but new in Go, may have difficulties to find out right string function to use. - - Here is a list of functions in [strings](http://golang.org/pkg/strings) and [xstrings](https://godoc.org/github.com/huandu/xstrings) with enough extra information about how to map these functions to their friends in other languages. Hope this list could be helpful for fresh gophers. - --### Package `xstrings` functions ### -- --*Keep this table sorted by Function in ascending order.* -- --| Function | Friends | # | --| -------- | ------- | --- | --| [Center](https://godoc.org/github.com/huandu/xstrings#Center) | `str.center` in Python; `String#center` in Ruby | [#30](https://github.com/huandu/xstrings/issues/30) | --| [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) | --| [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) | --| [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) | --| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | --| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | --| [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) | --| [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) | --| [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) | --| [Len](https://godoc.org/github.com/huandu/xstrings#Len) | `mb_strlen` in PHP | [#23](https://github.com/huandu/xstrings/issues/23) | --| [Partition](https://godoc.org/github.com/huandu/xstrings#Partition) | `str.partition` in Python; `String#partition` in Ruby | [#10](https://github.com/huandu/xstrings/issues/10) | --| [Reverse](https://godoc.org/github.com/huandu/xstrings#Reverse) | `String#reverse` in Ruby; `strrev` in PHP; `reverse` in Perl | [#7](https://github.com/huandu/xstrings/issues/7) | --| [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) | --| [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) | --| [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) | --| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | --| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | --| [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) | --| [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) | --| [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) | --| [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) | --| [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | --| [ToKebab](https://godoc.org/github.com/huandu/xstrings#ToKebabCase) | - | [#41](https://github.com/huandu/xstrings/issues/41) | --| [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | --| [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) | --| [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) | --| [WordCount](https://godoc.org/github.com/huandu/xstrings#WordCount) | `str_word_count` in PHP | [#14](https://github.com/huandu/xstrings/issues/14) | --| [WordSplit](https://godoc.org/github.com/huandu/xstrings#WordSplit) | - | [#14](https://github.com/huandu/xstrings/issues/14) | -- --### Package `strings` functions ### -- --*Keep this table sorted by Function in ascending order.* -- --| Function | Friends | --| -------- | ------- | --| [Contains](http://golang.org/pkg/strings/#Contains) | `String#include?` in Ruby | --| [ContainsAny](http://golang.org/pkg/strings/#ContainsAny) | - | --| [ContainsRune](http://golang.org/pkg/strings/#ContainsRune) | - | --| [Count](http://golang.org/pkg/strings/#Count) | `str.count` in Python; `substr_count` in PHP | --| [EqualFold](http://golang.org/pkg/strings/#EqualFold) | `stricmp` in PHP; `String#casecmp` in Ruby | --| [Fields](http://golang.org/pkg/strings/#Fields) | `str.split` in Python; `split` in Perl; `String#split` in Ruby | --| [FieldsFunc](http://golang.org/pkg/strings/#FieldsFunc) | - | --| [HasPrefix](http://golang.org/pkg/strings/#HasPrefix) | `str.startswith` in Python; `String#start_with?` in Ruby | --| [HasSuffix](http://golang.org/pkg/strings/#HasSuffix) | `str.endswith` in Python; `String#end_with?` in Ruby | --| [Index](http://golang.org/pkg/strings/#Index) | `str.index` in Python; `String#index` in Ruby; `strpos` in PHP; `index` in Perl | --| [IndexAny](http://golang.org/pkg/strings/#IndexAny) | - | --| [IndexByte](http://golang.org/pkg/strings/#IndexByte) | - | --| [IndexFunc](http://golang.org/pkg/strings/#IndexFunc) | - | --| [IndexRune](http://golang.org/pkg/strings/#IndexRune) | - | --| [Join](http://golang.org/pkg/strings/#Join) | `str.join` in Python; `Array#join` in Ruby; `implode` in PHP; `join` in Perl | --| [LastIndex](http://golang.org/pkg/strings/#LastIndex) | `str.rindex` in Python; `String#rindex`; `strrpos` in PHP; `rindex` in Perl | --| [LastIndexAny](http://golang.org/pkg/strings/#LastIndexAny) | - | --| [LastIndexFunc](http://golang.org/pkg/strings/#LastIndexFunc) | - | --| [Map](http://golang.org/pkg/strings/#Map) | `String#each_codepoint` in Ruby | --| [Repeat](http://golang.org/pkg/strings/#Repeat) | operator `*` in Python and Ruby; `str_repeat` in PHP | --| [Replace](http://golang.org/pkg/strings/#Replace) | `str.replace` in Python; `String#sub` in Ruby; `str_replace` in PHP | --| [Split](http://golang.org/pkg/strings/#Split) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | --| [SplitAfter](http://golang.org/pkg/strings/#SplitAfter) | - | --| [SplitAfterN](http://golang.org/pkg/strings/#SplitAfterN) | - | --| [SplitN](http://golang.org/pkg/strings/#SplitN) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | --| [Title](http://golang.org/pkg/strings/#Title) | `str.title` in Python | --| [ToLower](http://golang.org/pkg/strings/#ToLower) | `str.lower` in Python; `String#downcase` in Ruby; `strtolower` in PHP; `lc` in Perl | --| [ToLowerSpecial](http://golang.org/pkg/strings/#ToLowerSpecial) | - | --| [ToTitle](http://golang.org/pkg/strings/#ToTitle) | - | --| [ToTitleSpecial](http://golang.org/pkg/strings/#ToTitleSpecial) | - | --| [ToUpper](http://golang.org/pkg/strings/#ToUpper) | `str.upper` in Python; `String#upcase` in Ruby; `strtoupper` in PHP; `uc` in Perl | --| [ToUpperSpecial](http://golang.org/pkg/strings/#ToUpperSpecial) | - | --| [Trim](http://golang.org/pkg/strings/#Trim) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | --| [TrimFunc](http://golang.org/pkg/strings/#TrimFunc) | - | --| [TrimLeft](http://golang.org/pkg/strings/#TrimLeft) | `str.lstrip` in Python; `String#lstrip` in Ruby; `ltrim` in PHP | --| [TrimLeftFunc](http://golang.org/pkg/strings/#TrimLeftFunc) | - | --| [TrimPrefix](http://golang.org/pkg/strings/#TrimPrefix) | - | --| [TrimRight](http://golang.org/pkg/strings/#TrimRight) | `str.rstrip` in Python; `String#rstrip` in Ruby; `rtrim` in PHP | --| [TrimRightFunc](http://golang.org/pkg/strings/#TrimRightFunc) | - | --| [TrimSpace](http://golang.org/pkg/strings/#TrimSpace) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | --| [TrimSuffix](http://golang.org/pkg/strings/#TrimSuffix) | `String#chomp` in Ruby; `chomp` in Perl | -- --## License ## -+### Package `xstrings` functions -+ -+_Keep this table sorted by Function in ascending order._ -+ -+| Function | Friends | # | -+| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | --------------------------------------------------- | -+| [Center](https://godoc.org/github.com/huandu/xstrings#Center) | `str.center` in Python; `String#center` in Ruby | [#30](https://github.com/huandu/xstrings/issues/30) | -+| [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) | -+| [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) | -+| [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) | -+| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | -+| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | -+| [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) | -+| [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) | -+| [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) | -+| [Len](https://godoc.org/github.com/huandu/xstrings#Len) | `mb_strlen` in PHP | [#23](https://github.com/huandu/xstrings/issues/23) | -+| [Partition](https://godoc.org/github.com/huandu/xstrings#Partition) | `str.partition` in Python; `String#partition` in Ruby | [#10](https://github.com/huandu/xstrings/issues/10) | -+| [Reverse](https://godoc.org/github.com/huandu/xstrings#Reverse) | `String#reverse` in Ruby; `strrev` in PHP; `reverse` in Perl | [#7](https://github.com/huandu/xstrings/issues/7) | -+| [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) | -+| [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) | -+| [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) | -+| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | -+| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | -+| [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) | -+| [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) | -+| [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) | -+| [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) | -+| [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | -+| [ToKebab](https://godoc.org/github.com/huandu/xstrings#ToKebabCase) | - | [#41](https://github.com/huandu/xstrings/issues/41) | -+| [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | -+| [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) | -+| [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) | -+| [WordCount](https://godoc.org/github.com/huandu/xstrings#WordCount) | `str_word_count` in PHP | [#14](https://github.com/huandu/xstrings/issues/14) | -+| [WordSplit](https://godoc.org/github.com/huandu/xstrings#WordSplit) | - | [#14](https://github.com/huandu/xstrings/issues/14) | -+ -+### Package `strings` functions -+ -+_Keep this table sorted by Function in ascending order._ -+ -+| Function | Friends | -+| --------------------------------------------------------------- | ----------------------------------------------------------------------------------- | -+| [Contains](http://golang.org/pkg/strings/#Contains) | `String#include?` in Ruby | -+| [ContainsAny](http://golang.org/pkg/strings/#ContainsAny) | - | -+| [ContainsRune](http://golang.org/pkg/strings/#ContainsRune) | - | -+| [Count](http://golang.org/pkg/strings/#Count) | `str.count` in Python; `substr_count` in PHP | -+| [EqualFold](http://golang.org/pkg/strings/#EqualFold) | `stricmp` in PHP; `String#casecmp` in Ruby | -+| [Fields](http://golang.org/pkg/strings/#Fields) | `str.split` in Python; `split` in Perl; `String#split` in Ruby | -+| [FieldsFunc](http://golang.org/pkg/strings/#FieldsFunc) | - | -+| [HasPrefix](http://golang.org/pkg/strings/#HasPrefix) | `str.startswith` in Python; `String#start_with?` in Ruby | -+| [HasSuffix](http://golang.org/pkg/strings/#HasSuffix) | `str.endswith` in Python; `String#end_with?` in Ruby | -+| [Index](http://golang.org/pkg/strings/#Index) | `str.index` in Python; `String#index` in Ruby; `strpos` in PHP; `index` in Perl | -+| [IndexAny](http://golang.org/pkg/strings/#IndexAny) | - | -+| [IndexByte](http://golang.org/pkg/strings/#IndexByte) | - | -+| [IndexFunc](http://golang.org/pkg/strings/#IndexFunc) | - | -+| [IndexRune](http://golang.org/pkg/strings/#IndexRune) | - | -+| [Join](http://golang.org/pkg/strings/#Join) | `str.join` in Python; `Array#join` in Ruby; `implode` in PHP; `join` in Perl | -+| [LastIndex](http://golang.org/pkg/strings/#LastIndex) | `str.rindex` in Python; `String#rindex`; `strrpos` in PHP; `rindex` in Perl | -+| [LastIndexAny](http://golang.org/pkg/strings/#LastIndexAny) | - | -+| [LastIndexFunc](http://golang.org/pkg/strings/#LastIndexFunc) | - | -+| [Map](http://golang.org/pkg/strings/#Map) | `String#each_codepoint` in Ruby | -+| [Repeat](http://golang.org/pkg/strings/#Repeat) | operator `*` in Python and Ruby; `str_repeat` in PHP | -+| [Replace](http://golang.org/pkg/strings/#Replace) | `str.replace` in Python; `String#sub` in Ruby; `str_replace` in PHP | -+| [Split](http://golang.org/pkg/strings/#Split) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | -+| [SplitAfter](http://golang.org/pkg/strings/#SplitAfter) | - | -+| [SplitAfterN](http://golang.org/pkg/strings/#SplitAfterN) | - | -+| [SplitN](http://golang.org/pkg/strings/#SplitN) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | -+| [Title](http://golang.org/pkg/strings/#Title) | `str.title` in Python | -+| [ToLower](http://golang.org/pkg/strings/#ToLower) | `str.lower` in Python; `String#downcase` in Ruby; `strtolower` in PHP; `lc` in Perl | -+| [ToLowerSpecial](http://golang.org/pkg/strings/#ToLowerSpecial) | - | -+| [ToTitle](http://golang.org/pkg/strings/#ToTitle) | - | -+| [ToTitleSpecial](http://golang.org/pkg/strings/#ToTitleSpecial) | - | -+| [ToUpper](http://golang.org/pkg/strings/#ToUpper) | `str.upper` in Python; `String#upcase` in Ruby; `strtoupper` in PHP; `uc` in Perl | -+| [ToUpperSpecial](http://golang.org/pkg/strings/#ToUpperSpecial) | - | -+| [Trim](http://golang.org/pkg/strings/#Trim) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | -+| [TrimFunc](http://golang.org/pkg/strings/#TrimFunc) | - | -+| [TrimLeft](http://golang.org/pkg/strings/#TrimLeft) | `str.lstrip` in Python; `String#lstrip` in Ruby; `ltrim` in PHP | -+| [TrimLeftFunc](http://golang.org/pkg/strings/#TrimLeftFunc) | - | -+| [TrimPrefix](http://golang.org/pkg/strings/#TrimPrefix) | - | -+| [TrimRight](http://golang.org/pkg/strings/#TrimRight) | `str.rstrip` in Python; `String#rstrip` in Ruby; `rtrim` in PHP | -+| [TrimRightFunc](http://golang.org/pkg/strings/#TrimRightFunc) | - | -+| [TrimSpace](http://golang.org/pkg/strings/#TrimSpace) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | -+| [TrimSuffix](http://golang.org/pkg/strings/#TrimSuffix) | `String#chomp` in Ruby; `chomp` in Perl | -+ -+## License - - This library is licensed under MIT license. See LICENSE for details. -diff --git a/vendor/github.com/huandu/xstrings/common.go b/vendor/github.com/huandu/xstrings/common.go -index 2aff57aab4d62..f427cc84e2ee2 100644 ---- a/vendor/github.com/huandu/xstrings/common.go -+++ b/vendor/github.com/huandu/xstrings/common.go -@@ -3,15 +3,11 @@ - - package xstrings - --import ( -- ""bytes"" --) -- - const bufferMaxInitGrowSize = 2048 - - // Lazy initialize a buffer. --func allocBuffer(orig, cur string) *bytes.Buffer { -- output := &bytes.Buffer{} -+func allocBuffer(orig, cur string) *stringBuilder { -+ output := &stringBuilder{} - maxSize := len(orig) * 4 - - // Avoid to reserve too much memory at once. -diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go -index 3d58fa81ae0e1..151c3151d9c8c 100644 ---- a/vendor/github.com/huandu/xstrings/convert.go -+++ b/vendor/github.com/huandu/xstrings/convert.go -@@ -4,7 +4,6 @@ - package xstrings - - import ( -- ""bytes"" - ""math/rand"" - ""unicode"" - ""unicode/utf8"" -@@ -23,7 +22,7 @@ func ToCamelCase(str string) string { - return """" - } - -- buf := &bytes.Buffer{} -+ buf := &stringBuilder{} - var r0, r1 rune - var size int - -@@ -112,7 +111,7 @@ func camelCaseToLowerCase(str string, connector rune) string { - return """" - } - -- buf := &bytes.Buffer{} -+ buf := &stringBuilder{} - wt, word, remaining := nextWord(str) - - for len(remaining) > 0 { -@@ -131,7 +130,7 @@ func camelCaseToLowerCase(str string, connector rune) string { - wt, word, remaining = nextWord(remaining) - } - -- if wt != invalidWord && wt != punctWord { -+ if wt != invalidWord && wt != punctWord && wt != connectorWord { - buf.WriteRune(connector) - } - -@@ -374,7 +373,7 @@ func nextValidRune(str string, prev rune) (r rune, size int) { - return - } - --func toLower(buf *bytes.Buffer, wt wordType, str string, connector rune) { -+func toLower(buf *stringBuilder, wt wordType, str string, connector rune) { - buf.Grow(buf.Len() + len(str)) - - if wt != upperCaseWord && wt != connectorWord { -@@ -401,7 +400,7 @@ func SwapCase(str string) string { - var r rune - var size int - -- buf := &bytes.Buffer{} -+ buf := &stringBuilder{} - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) -@@ -435,7 +434,7 @@ func FirstRuneToUpper(str string) string { - return str - } - -- buf := &bytes.Buffer{} -+ buf := &stringBuilder{} - buf.WriteRune(unicode.ToUpper(r)) - buf.WriteString(str[size:]) - return buf.String() -@@ -453,7 +452,7 @@ func FirstRuneToLower(str string) string { - return str - } - -- buf := &bytes.Buffer{} -+ buf := &stringBuilder{} - buf.WriteRune(unicode.ToLower(r)) - buf.WriteString(str[size:]) - return buf.String() -@@ -566,7 +565,7 @@ func Successor(str string) string { - - // Needs to add one character for carry. - if i < 0 && carry != ' ' { -- buf := &bytes.Buffer{} -+ buf := &stringBuilder{} - buf.Grow(l + 4) // Reserve enough space for write. - - if lastAlphanumeric != 0 { -diff --git a/vendor/github.com/huandu/xstrings/format.go b/vendor/github.com/huandu/xstrings/format.go -index 2d02df1c042f0..8cd76c525ccb7 100644 ---- a/vendor/github.com/huandu/xstrings/format.go -+++ b/vendor/github.com/huandu/xstrings/format.go -@@ -4,7 +4,6 @@ - package xstrings - - import ( -- ""bytes"" - ""unicode/utf8"" - ) - -@@ -28,7 +27,7 @@ func ExpandTabs(str string, tabSize int) string { - - var r rune - var i, size, column, expand int -- var output *bytes.Buffer -+ var output *stringBuilder - - orig := str - -@@ -43,7 +42,7 @@ func ExpandTabs(str string, tabSize int) string { - } - - for i = 0; i < expand; i++ { -- output.WriteByte(byte(' ')) -+ output.WriteRune(' ') - } - - column += expand -@@ -88,7 +87,7 @@ func LeftJustify(str string, length int, pad string) string { - remains := length - l - padLen := Len(pad) - -- output := &bytes.Buffer{} -+ output := &stringBuilder{} - output.Grow(len(str) + (remains/padLen+1)*len(pad)) - output.WriteString(str) - writePadString(output, pad, padLen, remains) -@@ -114,7 +113,7 @@ func RightJustify(str string, length int, pad string) string { - remains := length - l - padLen := Len(pad) - -- output := &bytes.Buffer{} -+ output := &stringBuilder{} - output.Grow(len(str) + (remains/padLen+1)*len(pad)) - writePadString(output, pad, padLen, remains) - output.WriteString(str) -@@ -140,7 +139,7 @@ func Center(str string, length int, pad string) string { - remains := length - l - padLen := Len(pad) - -- output := &bytes.Buffer{} -+ output := &stringBuilder{} - output.Grow(len(str) + (remains/padLen+1)*len(pad)) - writePadString(output, pad, padLen, remains/2) - output.WriteString(str) -@@ -148,7 +147,7 @@ func Center(str string, length int, pad string) string { - return output.String() - } - --func writePadString(output *bytes.Buffer, pad string, padLen, remains int) { -+func writePadString(output *stringBuilder, pad string, padLen, remains int) { - var r rune - var size int - -diff --git a/vendor/github.com/huandu/xstrings/manipulate.go b/vendor/github.com/huandu/xstrings/manipulate.go -index 0eefb43ed71d2..64075f9bb8a72 100644 ---- a/vendor/github.com/huandu/xstrings/manipulate.go -+++ b/vendor/github.com/huandu/xstrings/manipulate.go -@@ -4,7 +4,6 @@ - package xstrings - - import ( -- ""bytes"" - ""strings"" - ""unicode/utf8"" - ) -@@ -131,7 +130,7 @@ func Insert(dst, src string, index int) string { - // Scrub scrubs invalid utf8 bytes with repl string. - // Adjacent invalid bytes are replaced only once. - func Scrub(str, repl string) string { -- var buf *bytes.Buffer -+ var buf *stringBuilder - var r rune - var size, pos int - var hasError bool -@@ -144,7 +143,7 @@ func Scrub(str, repl string) string { - if r == utf8.RuneError { - if !hasError { - if buf == nil { -- buf = &bytes.Buffer{} -+ buf = &stringBuilder{} - } - - buf.WriteString(origin[:pos]) -diff --git a/vendor/github.com/huandu/xstrings/stringbuilder.go b/vendor/github.com/huandu/xstrings/stringbuilder.go -new file mode 100644 -index 0000000000000..bb0919d32f77d ---- /dev/null -+++ b/vendor/github.com/huandu/xstrings/stringbuilder.go -@@ -0,0 +1,7 @@ -+//+build go1.10 -+ -+package xstrings -+ -+import ""strings"" -+ -+type stringBuilder = strings.Builder -diff --git a/vendor/github.com/huandu/xstrings/stringbuilder_go110.go b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go -new file mode 100644 -index 0000000000000..dac389d139e7f ---- /dev/null -+++ b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go -@@ -0,0 +1,9 @@ -+//+build !go1.10 -+ -+package xstrings -+ -+import ""bytes"" -+ -+type stringBuilder struct { -+ bytes.Buffer -+} -diff --git a/vendor/github.com/huandu/xstrings/translate.go b/vendor/github.com/huandu/xstrings/translate.go -index 66e23f86d030c..42e694fb17618 100644 ---- a/vendor/github.com/huandu/xstrings/translate.go -+++ b/vendor/github.com/huandu/xstrings/translate.go -@@ -4,7 +4,6 @@ - package xstrings - - import ( -- ""bytes"" - ""unicode"" - ""unicode/utf8"" - ) -@@ -152,12 +151,12 @@ func NewTranslator(from, to string) *Translator { - continue - } - -- fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart, singleRunes) -+ _, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart, singleRunes) - fromEnd = utf8.RuneError - } - - if fromEnd != utf8.RuneError { -- singleRunes = tr.addRune(fromEnd, toStart, singleRunes) -+ tr.addRune(fromEnd, toStart, singleRunes) - } - - tr.reverted = reverted -@@ -303,7 +302,7 @@ func (tr *Translator) Translate(str string) string { - - orig := str - -- var output *bytes.Buffer -+ var output *stringBuilder - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) -@@ -500,7 +499,7 @@ func Squeeze(str, pattern string) string { - var size int - var skipSqueeze, matched bool - var tr *Translator -- var output *bytes.Buffer -+ var output *stringBuilder - - orig := str - last = -1 -diff --git a/vendor/modules.txt b/vendor/modules.txt -index 735b1e8cd3187..3313dbfb51465 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -90,10 +90,10 @@ github.com/Azure/go-autorest/tracing - # github.com/Masterminds/goutils v1.1.1 - ## explicit - github.com/Masterminds/goutils --# github.com/Masterminds/semver/v3 v3.1.1 --## explicit; go 1.12 -+# github.com/Masterminds/semver/v3 v3.2.0 -+## explicit; go 1.18 - github.com/Masterminds/semver/v3 --# github.com/Masterminds/sprig/v3 v3.2.2 -+# github.com/Masterminds/sprig/v3 v3.2.3 - ## explicit; go 1.13 - github.com/Masterminds/sprig/v3 - # github.com/Microsoft/go-winio v0.5.1 -@@ -777,7 +777,7 @@ github.com/hashicorp/serf/coordinate - # github.com/heroku/x v0.0.50 - ## explicit; go 1.12 - github.com/heroku/x/logplex/encoding --# github.com/huandu/xstrings v1.3.1 -+# github.com/huandu/xstrings v1.3.3 - ## explicit; go 1.12 - github.com/huandu/xstrings - # github.com/imdario/mergo v0.3.12",unknown,"Bump github.com/Masterminds/sprig/v3 from 3.2.2 to 3.2.3 (#8159) - -Bumps -[github.com/Masterminds/sprig/v3](https://github.com/Masterminds/sprig) -from 3.2.2 to 3.2.3. -
-Release notes -

Sourced from github.com/Masterminds/sprig/v3's -releases.

-
-

v3.2.3

-

Changed

- -
-
-
-Changelog -

Sourced from github.com/Masterminds/sprig/v3's -changelog.

-
-

Release 3.2.3 (2022-11-29)

-

Changed

- -
-
-
-Commits -
    -
  • 581758e -Updating the changelog for the 3.2.3 release
  • -
  • 5787448 -Updating changelog for 3.2.2 release
  • -
  • 8489c3e -Merge pull request #354 -from mattfarina/bump-crypto-v0.3.0
  • -
  • 42ac6ac -Updating crypto library
  • -
  • d65147b -Merge pull request #353 -from mattfarina/bump-semver-3.2.0
  • -
  • 92ac1ae -Updating semver package
  • -
  • ce20d69 -Merge pull request #313 -from book987/master
  • -
  • f9a478a -Merge pull request #334 -from aJetHorn/patch-1
  • -
  • 58a4f65 -Merge pull request #349 -from mattfarina/bump-go-19
  • -
  • 32424cc -Merge pull request #347 -from neelayu/patch-1
  • -
  • Additional commits viewable in compare -view
  • -
-
-
- - -[![Dependabot compatibility -score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/Masterminds/sprig/v3&package-manager=go_modules&previous-version=3.2.2&new-version=3.2.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) - -Dependabot will resolve any conflicts with this PR as long as you don't -alter it yourself. You can also trigger a rebase manually by commenting -`@dependabot rebase`. - -[//]: # (dependabot-automerge-start) -[//]: # (dependabot-automerge-end) - ---- - -
-Dependabot commands and options -
- -You can trigger Dependabot actions by commenting on this PR: -- `@dependabot rebase` will rebase this PR -- `@dependabot recreate` will recreate this PR, overwriting any edits -that have been made to it -- `@dependabot merge` will merge this PR after your CI passes on it -- `@dependabot squash and merge` will squash and merge this PR after -your CI passes on it -- `@dependabot cancel merge` will cancel a previously requested merge -and block automerging -- `@dependabot reopen` will reopen this PR if it is closed -- `@dependabot close` will close this PR and stop Dependabot recreating -it. You can achieve the same result by closing it manually -- `@dependabot ignore this major version` will close this PR and stop -Dependabot creating any more for this major version (unless you reopen -the PR or upgrade to it yourself) -- `@dependabot ignore this minor version` will close this PR and stop -Dependabot creating any more for this minor version (unless you reopen -the PR or upgrade to it yourself) -- `@dependabot ignore this dependency` will close this PR and stop -Dependabot creating any more for this dependency (unless you reopen the -PR or upgrade to it yourself) - - -
- -Signed-off-by: dependabot[bot] -Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>" -b51b7d7b55032b34fe8e10ffef87fa8cba073ade,2023-12-15 22:13:05,Andres Perez,"Helm: Use `/ingester/shutdown` endpoint in write pod for `preStop` hook (#11490) - -**What this PR does / why we need it**: - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - -**Checklist** -- [x] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [ ] Documentation added -- [ ] Tests updated -- [x] `CHANGELOG.md` updated -- [ ] If the change is worth mentioning in the release notes, add -`add-to-release-notes` label -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/setup/upgrade/_index.md` -- [x] For Helm chart changes bump the Helm chart version in -`production/helm/loki/Chart.yaml` and update -`production/helm/loki/CHANGELOG.md` and -`production/helm/loki/README.md`. [Example -PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) -- [ ] If the change is deprecating or removing a configuration option, -update the `deprecated-config.yaml` and `deleted-config.yaml` files -respectively in the `tools/deprecated-config-checker` directory. -[Example -PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)",False,"diff --git a/CHANGELOG.md b/CHANGELOG.md -index 35893fde8f601..3ee05edfb42b2 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -49,6 +49,7 @@ - - ##### Changes - -+* [11490](https://github.com/grafana/loki/pull/11490) **andresperezl**: Helm: Use `/ingester/shutdown` for `preStop` hook in write pods. - * [10366](https://github.com/grafana/loki/pull/10366) **shantanualsi** Upgrade thanos objstore, dskit and other modules - * [10451](https://github.com/grafana/loki/pull/10451) **shantanualsi** Upgrade thanos `objstore` - * [10814](https://github.com/grafana/loki/pull/10814) **shantanualsi,kaviraj** Upgrade prometheus to v0.47.1 and dskit -diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md -index daadea68d3870..626523e1bae4d 100644 ---- a/production/helm/loki/CHANGELOG.md -+++ b/production/helm/loki/CHANGELOG.md -@@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang - - [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) - -+## 5.41.4 -+ -+- [CHANGE] Use `/ingester/shutdown?terminate=false` for write `preStop` hook -+ - ## 5.41.3 - - - [FEATURE] Add support for defining an s3 backoff config. -diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml -index b68170b6b887b..095e2745a364a 100644 ---- a/production/helm/loki/Chart.yaml -+++ b/production/helm/loki/Chart.yaml -@@ -3,7 +3,7 @@ name: loki - description: Helm chart for Grafana Loki in simple, scalable mode - type: application - appVersion: 2.9.3 --version: 5.41.3 -+version: 5.41.4 - home: https://grafana.github.io/helm-charts - sources: - - https://github.com/grafana/loki -diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md -index 7550d8fdcd51f..2857f553e13f7 100644 ---- a/production/helm/loki/README.md -+++ b/production/helm/loki/README.md -@@ -1,6 +1,6 @@ - # loki - --![Version: 5.41.3](https://img.shields.io/badge/Version-5.41.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.3](https://img.shields.io/badge/AppVersion-2.9.3-informational?style=flat-square) -+![Version: 5.41.4](https://img.shields.io/badge/Version-5.41.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.3](https://img.shields.io/badge/AppVersion-2.9.3-informational?style=flat-square) - - Helm chart for Grafana Loki in simple, scalable mode - -diff --git a/production/helm/loki/templates/write/statefulset-write.yaml b/production/helm/loki/templates/write/statefulset-write.yaml -index 8c5e426d3ffd6..ca67038a16192 100644 ---- a/production/helm/loki/templates/write/statefulset-write.yaml -+++ b/production/helm/loki/templates/write/statefulset-write.yaml -@@ -119,7 +119,7 @@ spec: - lifecycle: - preStop: - httpGet: -- path: ""/ingester/flush_shutdown"" -+ path: ""/ingester/shutdown?terminate=false"" - port: http-metrics - {{- end }} - volumeMounts:",Helm,"Use `/ingester/shutdown` endpoint in write pod for `preStop` hook (#11490) - -**What this PR does / why we need it**: - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - -**Checklist** -- [x] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [ ] Documentation added -- [ ] Tests updated -- [x] `CHANGELOG.md` updated -- [ ] If the change is worth mentioning in the release notes, add -`add-to-release-notes` label -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/setup/upgrade/_index.md` -- [x] For Helm chart changes bump the Helm chart version in -`production/helm/loki/Chart.yaml` and update -`production/helm/loki/CHANGELOG.md` and -`production/helm/loki/README.md`. [Example -PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) -- [ ] If the change is deprecating or removing a configuration option, -update the `deprecated-config.yaml` and `deleted-config.yaml` files -respectively in the `tools/deprecated-config-checker` directory. -[Example -PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)" -ad322c0fc22bbb99128001b81ebb384bd778066c,2024-12-03 06:58:55,Ashwanth,feat(block-scheduler): adds service and basic planner support for scheduler (#15200),False,"diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md -index 6c8ed01c5c0c7..18c4cdceb649e 100644 ---- a/docs/sources/shared/configuration.md -+++ b/docs/sources/shared/configuration.md -@@ -188,6 +188,26 @@ block_builder: - # CLI flag: -blockbuilder.backoff..backoff-retries - [max_retries: | default = 10] - -+block_scheduler: -+ # Consumer group used by block scheduler to track the last consumed offset. -+ # CLI flag: -block-scheduler.consumer-group -+ [consumer_group: | default = ""block-scheduler""] -+ -+ # How often the scheduler should plan jobs. -+ # CLI flag: -block-scheduler.interval -+ [interval: | default = 5m] -+ -+ # Period used by the planner to calculate the start and end offset such that -+ # each job consumes records spanning the target period. -+ # CLI flag: -block-scheduler.target-records-spanning-period -+ [target_records_spanning_period: | default = 1h] -+ -+ # Lookback period in milliseconds used by the scheduler to plan jobs when the -+ # consumer group has no commits. -1 consumes from the latest offset. -2 -+ # consumes from the start of the partition. -+ # CLI flag: -block-scheduler.lookback-period -+ [lookback_period: | default = -2] -+ - pattern_ingester: - # Whether the pattern ingester is enabled. - # CLI flag: -pattern-ingester.enabled -diff --git a/pkg/blockbuilder/scheduler/kafkautil.go b/pkg/blockbuilder/scheduler/kafkautil.go -new file mode 100644 -index 0000000000000..f746f2a9fd4e0 ---- /dev/null -+++ b/pkg/blockbuilder/scheduler/kafkautil.go -@@ -0,0 +1,80 @@ -+// SPDX-License-Identifier: AGPL-3.0-only -+ -+package scheduler -+ -+import ( -+ ""context"" -+ ""errors"" -+ ""fmt"" -+ ""sync"" -+ -+ ""github.com/twmb/franz-go/pkg/kadm"" -+ ""github.com/twmb/franz-go/pkg/kerr"" -+) -+ -+// GetGroupLag is similar to `kadm.Client.Lag` but works when the group doesn't have live participants. -+// Similar to `kadm.CalculateGroupLagWithStartOffsets`, it takes into account that the group may not have any commits. -+// -+// The lag is the difference between the last produced offset (high watermark) and an offset in the ""past"". -+// If the block builder committed an offset for a given partition to the consumer group at least once, then -+// the lag is the difference between the last produced offset and the offset committed in the consumer group. -+// Otherwise, if the block builder didn't commit an offset for a given partition yet (e.g. block builder is -+// running for the first time), then the lag is the difference between the last produced offset and fallbackOffsetMillis. -+func GetGroupLag(ctx context.Context, admClient *kadm.Client, topic, group string, fallbackOffsetMillis int64) (kadm.GroupLag, error) { -+ offsets, err := admClient.FetchOffsets(ctx, group) -+ if err != nil { -+ if !errors.Is(err, kerr.GroupIDNotFound) { -+ return nil, fmt.Errorf(""fetch offsets: %w"", err) -+ } -+ } -+ if err := offsets.Error(); err != nil { -+ return nil, fmt.Errorf(""fetch offsets got error in response: %w"", err) -+ } -+ -+ startOffsets, err := admClient.ListStartOffsets(ctx, topic) -+ if err != nil { -+ return nil, err -+ } -+ endOffsets, err := admClient.ListEndOffsets(ctx, topic) -+ if err != nil { -+ return nil, err -+ } -+ -+ resolveFallbackOffsets := sync.OnceValues(func() (kadm.ListedOffsets, error) { -+ return admClient.ListOffsetsAfterMilli(ctx, fallbackOffsetMillis, topic) -+ }) -+ // If the group-partition in offsets doesn't have a commit, fall back depending on where fallbackOffsetMillis points at. -+ for topic, pt := range startOffsets.Offsets() { -+ for partition, startOffset := range pt { -+ if _, ok := offsets.Lookup(topic, partition); ok { -+ continue -+ } -+ fallbackOffsets, err := resolveFallbackOffsets() -+ if err != nil { -+ return nil, fmt.Errorf(""resolve fallback offsets: %w"", err) -+ } -+ o, ok := fallbackOffsets.Lookup(topic, partition) -+ if !ok { -+ return nil, fmt.Errorf(""partition %d not found in fallback offsets for topic %s"", partition, topic) -+ } -+ if o.Offset < startOffset.At { -+ // Skip the resolved fallback offset if it's before the partition's start offset (i.e. before the earliest offset of the partition). -+ // This should not happen in Kafka, but can happen in Kafka-compatible systems, e.g. Warpstream. -+ continue -+ } -+ offsets.Add(kadm.OffsetResponse{Offset: kadm.Offset{ -+ Topic: o.Topic, -+ Partition: o.Partition, -+ At: o.Offset, -+ LeaderEpoch: o.LeaderEpoch, -+ }}) -+ } -+ } -+ -+ descrGroup := kadm.DescribedGroup{ -+ // ""Empty"" is the state that indicates that the group doesn't have active consumer members; this is always the case for block-builder, -+ // because we don't use group consumption. -+ State: ""Empty"", -+ } -+ return kadm.CalculateGroupLagWithStartOffsets(descrGroup, offsets, startOffsets, endOffsets), nil -+} -diff --git a/pkg/blockbuilder/scheduler/kafkautil_test.go b/pkg/blockbuilder/scheduler/kafkautil_test.go -new file mode 100644 -index 0000000000000..d2a865702a808 ---- /dev/null -+++ b/pkg/blockbuilder/scheduler/kafkautil_test.go -@@ -0,0 +1,164 @@ -+// SPDX-License-Identifier: AGPL-3.0-only -+ -+package scheduler -+ -+import ( -+ ""context"" -+ ""errors"" -+ ""testing"" -+ ""time"" -+ -+ ""github.com/stretchr/testify/require"" -+ ""github.com/twmb/franz-go/pkg/kadm"" -+ ""github.com/twmb/franz-go/pkg/kgo"" -+ -+ ""github.com/grafana/loki/v3/pkg/kafka/testkafka"" -+) -+ -+const ( -+ testTopic = ""test"" -+ testGroup = ""testgroup"" -+) -+ -+func TestKafkaGetGroupLag(t *testing.T) { -+ ctx, cancel := context.WithCancelCause(context.Background()) -+ t.Cleanup(func() { cancel(errors.New(""test done"")) }) -+ -+ _, addr := testkafka.CreateClusterWithoutCustomConsumerGroupsSupport(t, 3, testTopic) -+ kafkaClient := mustKafkaClient(t, addr) -+ admClient := kadm.NewClient(kafkaClient) -+ -+ const numRecords = 5 -+ -+ var producedRecords []kgo.Record -+ kafkaTime := time.Now().Add(-12 * time.Hour) -+ for i := int64(0); i < numRecords; i++ { -+ kafkaTime = kafkaTime.Add(time.Minute) -+ -+ // Produce and keep records to partition 0. -+ res := produceRecords(ctx, t, kafkaClient, kafkaTime, ""1"", testTopic, 0, []byte(`test value`)) -+ rec, err := res.First() -+ require.NoError(t, err) -+ require.NotNil(t, rec) -+ -+ producedRecords = append(producedRecords, *rec) -+ -+ // Produce same records to partition 1 (this partition won't have any commits). -+ produceRecords(ctx, t, kafkaClient, kafkaTime, ""1"", testTopic, 1, []byte(`test value`)) -+ } -+ require.Len(t, producedRecords, numRecords) -+ -+ // Commit last produced record from partition 0. -+ rec := producedRecords[len(producedRecords)-1] -+ offsets := make(kadm.Offsets) -+ offsets.Add(kadm.Offset{ -+ Topic: rec.Topic, -+ Partition: rec.Partition, -+ At: rec.Offset + 1, -+ LeaderEpoch: rec.LeaderEpoch, -+ }) -+ err := admClient.CommitAllOffsets(ctx, testGroup, offsets) -+ require.NoError(t, err) -+ -+ // Truncate partition 1 after second to last record to emulate the retention -+ // Note Kafka sets partition's start offset to the requested offset. Any records within the segment before the requested offset can no longer be read. -+ // Note the difference between DeleteRecords and DeleteOffsets in kadm docs. -+ deleteRecOffsets := make(kadm.Offsets) -+ deleteRecOffsets.Add(kadm.Offset{ -+ Topic: testTopic, -+ Partition: 1, -+ At: numRecords - 2, -+ }) -+ _, err = admClient.DeleteRecords(ctx, deleteRecOffsets) -+ require.NoError(t, err) -+ -+ getTopicPartitionLag := func(t *testing.T, lag kadm.GroupLag, topic string, part int32) int64 { -+ l, ok := lag.Lookup(topic, part) -+ require.True(t, ok) -+ return l.Lag -+ } -+ -+ t.Run(""fallbackOffset=milliseconds"", func(t *testing.T) { -+ // get the timestamp of the last produced record -+ rec := producedRecords[len(producedRecords)-1] -+ fallbackOffset := rec.Timestamp.Add(-time.Millisecond).UnixMilli() -+ groupLag, err := GetGroupLag(ctx, admClient, testTopic, testGroup, fallbackOffset) -+ require.NoError(t, err) -+ -+ require.EqualValues(t, 0, getTopicPartitionLag(t, groupLag, testTopic, 0), ""partition 0 must have no lag"") -+ require.EqualValues(t, 1, getTopicPartitionLag(t, groupLag, testTopic, 1), ""partition 1 must fall back to known record and get its lag from there"") -+ require.EqualValues(t, 0, getTopicPartitionLag(t, groupLag, testTopic, 2), ""partition 2 has no data and must have no lag"") -+ }) -+ -+ t.Run(""fallbackOffset=before-earliest"", func(t *testing.T) { -+ // get the timestamp of third to last produced record (record before earliest in partition 1) -+ rec := producedRecords[len(producedRecords)-3] -+ fallbackOffset := rec.Timestamp.Add(-time.Millisecond).UnixMilli() -+ groupLag, err := GetGroupLag(ctx, admClient, testTopic, testGroup, fallbackOffset) -+ require.NoError(t, err) -+ -+ require.EqualValues(t, 0, getTopicPartitionLag(t, groupLag, testTopic, 0), ""partition 0 must have no lag"") -+ require.EqualValues(t, 2, getTopicPartitionLag(t, groupLag, testTopic, 1), ""partition 1 must fall back to earliest and get its lag from there"") -+ require.EqualValues(t, 0, getTopicPartitionLag(t, groupLag, testTopic, 2), ""partition 2 has no data and must have no lag"") -+ }) -+ -+ t.Run(""fallbackOffset=0"", func(t *testing.T) { -+ groupLag, err := GetGroupLag(ctx, admClient, testTopic, testGroup, 0) -+ require.NoError(t, err) -+ -+ require.EqualValues(t, 0, getTopicPartitionLag(t, groupLag, testTopic, 0), ""partition 0 must have no lag"") -+ require.EqualValues(t, 2, getTopicPartitionLag(t, groupLag, testTopic, 1), ""partition 1 must fall back to the earliest and get its lag from there"") -+ require.EqualValues(t, 0, getTopicPartitionLag(t, groupLag, testTopic, 2), ""partition 2 has no data and must have no lag"") -+ }) -+ -+ t.Run(""group=unknown"", func(t *testing.T) { -+ groupLag, err := GetGroupLag(ctx, admClient, testTopic, ""unknown"", 0) -+ require.NoError(t, err) -+ -+ // This group doesn't have any commits, so it must calc its lag from the fallback. -+ require.EqualValues(t, numRecords, getTopicPartitionLag(t, groupLag, testTopic, 0)) -+ require.EqualValues(t, 2, getTopicPartitionLag(t, groupLag, testTopic, 1), ""partition 1 must fall back to the earliest and get its lag from there"") -+ require.EqualValues(t, 0, getTopicPartitionLag(t, groupLag, testTopic, 2), ""partition 2 has no data and must have no lag"") -+ }) -+} -+ -+func mustKafkaClient(t *testing.T, addrs ...string) *kgo.Client { -+ writeClient, err := kgo.NewClient( -+ kgo.SeedBrokers(addrs...), -+ kgo.AllowAutoTopicCreation(), -+ // We will choose the partition of each record. -+ kgo.RecordPartitioner(kgo.ManualPartitioner()), -+ ) -+ require.NoError(t, err) -+ t.Cleanup(writeClient.Close) -+ return writeClient -+} -+ -+func produceRecords( -+ ctx context.Context, -+ t *testing.T, -+ kafkaClient *kgo.Client, -+ ts time.Time, -+ userID string, -+ topic string, -+ part int32, -+ val []byte, -+) kgo.ProduceResults { -+ rec := &kgo.Record{ -+ Timestamp: ts, -+ Key: []byte(userID), -+ Value: val, -+ Topic: topic, -+ Partition: part, // samples in this batch are split between N partitions -+ } -+ produceResult := kafkaClient.ProduceSync(ctx, rec) -+ require.NoError(t, produceResult.FirstErr()) -+ return produceResult -+} -+ -+func commitOffset(ctx context.Context, t *testing.T, kafkaClient *kgo.Client, group string, offset kadm.Offset) { -+ offsets := make(kadm.Offsets) -+ offsets.Add(offset) -+ err := kadm.NewClient(kafkaClient).CommitAllOffsets(ctx, group, offsets) -+ require.NoError(t, err) -+} -diff --git a/pkg/blockbuilder/scheduler/metrics.go b/pkg/blockbuilder/scheduler/metrics.go -new file mode 100644 -index 0000000000000..4e1dbfa2afa1c ---- /dev/null -+++ b/pkg/blockbuilder/scheduler/metrics.go -@@ -0,0 +1,24 @@ -+package scheduler -+ -+import ( -+ ""github.com/prometheus/client_golang/prometheus"" -+ ""github.com/prometheus/client_golang/prometheus/promauto"" -+) -+ -+type Metrics struct { -+ lag *prometheus.GaugeVec -+ committedOffset *prometheus.GaugeVec -+} -+ -+func NewMetrics(reg prometheus.Registerer) *Metrics { -+ return &Metrics{ -+ lag: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ -+ Name: ""loki_block_scheduler_group_lag"", -+ Help: ""How far behind the block scheduler consumer group is from the latest offset."", -+ }, []string{""partition""}), -+ committedOffset: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ -+ Name: ""loki_block_scheduler_group_committed_offset"", -+ Help: ""The current offset the block scheduler consumer group is at."", -+ }, []string{""partition""}), -+ } -+} -diff --git a/pkg/blockbuilder/scheduler/offsets_reader.go b/pkg/blockbuilder/scheduler/offsets_reader.go -new file mode 100644 -index 0000000000000..742185dba817f ---- /dev/null -+++ b/pkg/blockbuilder/scheduler/offsets_reader.go -@@ -0,0 +1,62 @@ -+package scheduler -+ -+import ( -+ ""context"" -+ ""errors"" -+ ""time"" -+ -+ ""github.com/twmb/franz-go/pkg/kadm"" -+ ""github.com/twmb/franz-go/pkg/kgo"" -+) -+ -+type offsetReader struct { -+ topic string -+ consumerGroup string -+ fallbackOffsetMillis int64 -+ -+ adminClient *kadm.Client -+} -+ -+func NewOffsetReader(topic, consumerGroup string, lookbackPeriodInMs int64, client *kgo.Client) OffsetReader { -+ var fallbackOffsetMillis int64 -+ if lookbackPeriodInMs >= 0 { -+ fallbackOffsetMillis = time.Now().UnixMilli() - lookbackPeriodInMs -+ } else { -+ fallbackOffsetMillis = lookbackPeriodInMs -+ } -+ -+ return &offsetReader{ -+ topic: topic, -+ consumerGroup: consumerGroup, -+ adminClient: kadm.NewClient(client), -+ fallbackOffsetMillis: fallbackOffsetMillis, -+ } -+} -+ -+func (r *offsetReader) GroupLag(ctx context.Context) (map[int32]kadm.GroupMemberLag, error) { -+ lag, err := GetGroupLag(ctx, r.adminClient, r.topic, r.consumerGroup, r.fallbackOffsetMillis) -+ if err != nil { -+ return nil, err -+ } -+ -+ offsets, ok := lag[r.topic] -+ if !ok { -+ return nil, errors.New(""no lag found for the topic"") -+ } -+ -+ return offsets, nil -+} -+ -+func (r *offsetReader) ListOffsetsAfterMilli(ctx context.Context, ts int64) (map[int32]kadm.ListedOffset, error) { -+ offsets, err := r.adminClient.ListOffsetsAfterMilli(ctx, ts, r.topic) -+ if err != nil { -+ return nil, err -+ } -+ -+ resp, ok := offsets[r.topic] -+ if !ok { -+ return nil, errors.New(""no offsets found for the topic"") -+ } -+ -+ return resp, nil -+} -diff --git a/pkg/blockbuilder/scheduler/queue.go b/pkg/blockbuilder/scheduler/queue.go -index 3e9cf087c6792..e2f125ad70a07 100644 ---- a/pkg/blockbuilder/scheduler/queue.go -+++ b/pkg/blockbuilder/scheduler/queue.go -@@ -30,6 +30,25 @@ func NewJobQueue() *JobQueue { - } - } - -+func (q *JobQueue) Exists(job *types.Job) (types.JobStatus, bool) { -+ q.mu.RLock() -+ defer q.mu.RUnlock() -+ -+ if _, ok := q.inProgress[job.ID]; ok { -+ return types.JobStatusInProgress, true -+ } -+ -+ if _, ok := q.pending[job.ID]; ok { -+ return types.JobStatusPending, true -+ } -+ -+ if _, ok := q.completed[job.ID]; ok { -+ return types.JobStatusComplete, true -+ } -+ -+ return -1, false -+} -+ - // Enqueue adds a new job to the pending queue - // This is a naive implementation, intended to be refactored - func (q *JobQueue) Enqueue(job *types.Job) error { -diff --git a/pkg/blockbuilder/scheduler/scheduler.go b/pkg/blockbuilder/scheduler/scheduler.go -index 274713b5b1c36..dbf732742de39 100644 ---- a/pkg/blockbuilder/scheduler/scheduler.go -+++ b/pkg/blockbuilder/scheduler/scheduler.go -@@ -2,43 +2,140 @@ package scheduler - - import ( - ""context"" -+ ""errors"" -+ ""flag"" -+ ""strconv"" -+ ""time"" -+ -+ ""github.com/go-kit/log"" -+ ""github.com/go-kit/log/level"" -+ ""github.com/grafana/dskit/services"" -+ ""github.com/prometheus/client_golang/prometheus"" -+ ""github.com/twmb/franz-go/pkg/kadm"" - - ""github.com/grafana/loki/v3/pkg/blockbuilder/types"" - ) - - var ( - _ types.Scheduler = unimplementedScheduler{} -- _ types.Scheduler = &QueueScheduler{} -+ _ types.Scheduler = &BlockScheduler{} - ) - --// unimplementedScheduler provides default implementations that panic. --type unimplementedScheduler struct{} -+type Config struct { -+ ConsumerGroup string `yaml:""consumer_group""` -+ Interval time.Duration `yaml:""interval""` -+ TargetRecordConsumptionPeriod time.Duration `yaml:""target_records_spanning_period""` -+ LookbackPeriod int64 `yaml:""lookback_period""` -+} - --func (s unimplementedScheduler) HandleGetJob(_ context.Context, _ string) (*types.Job, bool, error) { -- panic(""unimplemented"") -+func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { -+ f.DurationVar(&cfg.Interval, prefix+""interval"", 5*time.Minute, ""How often the scheduler should plan jobs."") -+ f.DurationVar(&cfg.TargetRecordConsumptionPeriod, prefix+""target-records-spanning-period"", time.Hour, ""Period used by the planner to calculate the start and end offset such that each job consumes records spanning the target period."") -+ f.StringVar(&cfg.ConsumerGroup, prefix+""consumer-group"", ""block-scheduler"", ""Consumer group used by block scheduler to track the last consumed offset."") -+ f.Int64Var(&cfg.LookbackPeriod, prefix+""lookback-period"", -2, ""Lookback period in milliseconds used by the scheduler to plan jobs when the consumer group has no commits. -1 consumes from the latest offset. -2 consumes from the start of the partition."") - } - --func (s unimplementedScheduler) HandleCompleteJob(_ context.Context, _ string, _ *types.Job) error { -- panic(""unimplemented"") -+func (cfg *Config) RegisterFlags(f *flag.FlagSet) { -+ cfg.RegisterFlagsWithPrefix(""block-scheduler."", f) - } - --func (s unimplementedScheduler) HandleSyncJob(_ context.Context, _ string, _ *types.Job) error { -- panic(""unimplemented"") -+func (cfg *Config) Validate() error { -+ if cfg.Interval <= 0 { -+ return errors.New(""interval must be a non-zero value"") -+ } -+ -+ if cfg.LookbackPeriod < -2 { -+ return errors.New(""only -1(latest) and -2(earliest) are valid as negative values for lookback_period"") -+ } -+ -+ return nil - } - --// QueueScheduler implements the Scheduler interface --type QueueScheduler struct { -- queue *JobQueue -+// BlockScheduler implements the Scheduler interface -+type BlockScheduler struct { -+ services.Service -+ -+ cfg Config -+ logger log.Logger -+ queue *JobQueue -+ metrics *Metrics -+ -+ offsetReader OffsetReader -+ planner Planner - } - - // NewScheduler creates a new scheduler instance --func NewScheduler(queue *JobQueue) *QueueScheduler { -- return &QueueScheduler{ -- queue: queue, -+func NewScheduler(cfg Config, queue *JobQueue, offsetReader OffsetReader, logger log.Logger, r prometheus.Registerer) *BlockScheduler { -+ planner := NewTimeRangePlanner(cfg.TargetRecordConsumptionPeriod, offsetReader, func() time.Time { return time.Now().UTC() }, logger) -+ s := &BlockScheduler{ -+ cfg: cfg, -+ planner: planner, -+ offsetReader: offsetReader, -+ logger: logger, -+ metrics: NewMetrics(r), -+ queue: queue, -+ } -+ s.Service = services.NewBasicService(nil, s.running, nil) -+ return s -+} -+ -+func (s *BlockScheduler) running(ctx context.Context) error { -+ if err := s.runOnce(ctx); err != nil { -+ level.Error(s.logger).Log(""msg"", ""failed to schedule jobs"", ""err"", err) -+ } -+ -+ ticker := time.NewTicker(s.cfg.Interval) -+ for { -+ select { -+ case <-ticker.C: -+ if err := s.runOnce(ctx); err != nil { -+ // TODO: add metrics -+ level.Error(s.logger).Log(""msg"", ""failed to schedule jobs"", ""err"", err) -+ } -+ case <-ctx.Done(): -+ return nil -+ } - } - } - --func (s *QueueScheduler) HandleGetJob(ctx context.Context, builderID string) (*types.Job, bool, error) { -+func (s *BlockScheduler) runOnce(ctx context.Context) error { -+ lag, err := s.offsetReader.GroupLag(ctx) -+ if err != nil { -+ level.Error(s.logger).Log(""msg"", ""failed to get group lag"", ""err"", err) -+ return err -+ } -+ -+ s.publishLagMetrics(lag) -+ -+ jobs, err := s.planner.Plan(ctx) -+ if err != nil { -+ level.Error(s.logger).Log(""msg"", ""failed to plan jobs"", ""err"", err) -+ } -+ -+ for _, job := range jobs { -+ // TODO: end offset keeps moving each time we plan jobs, maybe we should not use it as part of the job ID -+ if status, ok := s.queue.Exists(&job); ok { -+ level.Debug(s.logger).Log(""msg"", ""job already exists"", ""job"", job, ""status"", status) -+ continue -+ } -+ -+ if err := s.queue.Enqueue(&job); err != nil { -+ level.Error(s.logger).Log(""msg"", ""failed to enqueue job"", ""job"", job, ""err"", err) -+ } -+ } -+ -+ return nil -+} -+ -+func (s *BlockScheduler) publishLagMetrics(lag map[int32]kadm.GroupMemberLag) { -+ for partition, offsets := range lag { -+ // useful for scaling builders -+ s.metrics.lag.WithLabelValues(strconv.Itoa(int(partition))).Set(float64(offsets.Lag)) -+ s.metrics.committedOffset.WithLabelValues(strconv.Itoa(int(partition))).Set(float64(offsets.Commit.At)) -+ } -+} -+ -+func (s *BlockScheduler) HandleGetJob(ctx context.Context, builderID string) (*types.Job, bool, error) { - select { - case <-ctx.Done(): - return nil, false, ctx.Err() -@@ -47,10 +144,26 @@ func (s *QueueScheduler) HandleGetJob(ctx context.Context, builderID string) (*t - } - } - --func (s *QueueScheduler) HandleCompleteJob(_ context.Context, builderID string, job *types.Job) error { -+func (s *BlockScheduler) HandleCompleteJob(_ context.Context, builderID string, job *types.Job) error { -+ // TODO: handle commits - return s.queue.MarkComplete(job.ID, builderID) - } - --func (s *QueueScheduler) HandleSyncJob(_ context.Context, builderID string, job *types.Job) error { -+func (s *BlockScheduler) HandleSyncJob(_ context.Context, builderID string, job *types.Job) error { - return s.queue.SyncJob(job.ID, builderID, job) - } -+ -+// unimplementedScheduler provides default implementations that panic. -+type unimplementedScheduler struct{} -+ -+func (s unimplementedScheduler) HandleGetJob(_ context.Context, _ string) (*types.Job, bool, error) { -+ panic(""unimplemented"") -+} -+ -+func (s unimplementedScheduler) HandleCompleteJob(_ context.Context, _ string, _ *types.Job) error { -+ panic(""unimplemented"") -+} -+ -+func (s unimplementedScheduler) HandleSyncJob(_ context.Context, _ string, _ *types.Job) error { -+ panic(""unimplemented"") -+} -diff --git a/pkg/blockbuilder/scheduler/scheduler_test.go b/pkg/blockbuilder/scheduler/scheduler_test.go -index ad6829bc8fe69..bd9e00450dfa7 100644 ---- a/pkg/blockbuilder/scheduler/scheduler_test.go -+++ b/pkg/blockbuilder/scheduler/scheduler_test.go -@@ -5,20 +5,23 @@ import ( - ""testing"" - ""time"" - -+ ""github.com/go-kit/log"" -+ ""github.com/prometheus/client_golang/prometheus"" -+ - ""github.com/grafana/loki/v3/pkg/blockbuilder/builder"" - ""github.com/grafana/loki/v3/pkg/blockbuilder/types"" - ) - - type testEnv struct { - queue *JobQueue -- scheduler *QueueScheduler -+ scheduler *BlockScheduler - transport *builder.MemoryTransport - builder *builder.Worker - } - - func newTestEnv(builderID string) *testEnv { - queue := NewJobQueue() -- scheduler := NewScheduler(queue) -+ scheduler := NewScheduler(Config{}, queue, nil, log.NewNopLogger(), prometheus.NewRegistry()) - transport := builder.NewMemoryTransport(scheduler) - builder := builder.NewWorker(builderID, builder.NewMemoryTransport(scheduler)) - -diff --git a/pkg/blockbuilder/scheduler/strategy.go b/pkg/blockbuilder/scheduler/strategy.go -new file mode 100644 -index 0000000000000..5ea1fb6db2d9c ---- /dev/null -+++ b/pkg/blockbuilder/scheduler/strategy.go -@@ -0,0 +1,142 @@ -+package scheduler -+ -+import ( -+ ""context"" -+ ""time"" -+ -+ ""github.com/go-kit/log"" -+ ""github.com/go-kit/log/level"" -+ ""github.com/twmb/franz-go/pkg/kadm"" -+ -+ ""github.com/grafana/loki/v3/pkg/blockbuilder/types"" -+) -+ -+// OffsetReader is an interface to list offsets for all partitions of a topic from Kafka. -+type OffsetReader interface { -+ ListOffsetsAfterMilli(context.Context, int64) (map[int32]kadm.ListedOffset, error) -+ GroupLag(context.Context) (map[int32]kadm.GroupMemberLag, error) -+} -+ -+type Planner interface { -+ Name() string -+ Plan(ctx context.Context) ([]types.Job, error) -+} -+ -+const ( -+ RecordCountStrategy = ""record_count"" -+ TimeRangeStrategy = ""time_range"" -+) -+ -+// tries to consume upto targetRecordCount records per partition -+type RecordCountPlanner struct { -+ targetRecordCount int64 -+ offsetReader OffsetReader -+ logger log.Logger -+} -+ -+func NewRecordCountPlanner(targetRecordCount int64) *RecordCountPlanner { -+ return &RecordCountPlanner{ -+ targetRecordCount: targetRecordCount, -+ } -+} -+ -+func (p *RecordCountPlanner) Name() string { -+ return RecordCountStrategy -+} -+ -+func (p *RecordCountPlanner) Plan(ctx context.Context) ([]types.Job, error) { -+ offsets, err := p.offsetReader.GroupLag(ctx) -+ if err != nil { -+ level.Error(p.logger).Log(""msg"", ""failed to get group lag"", ""err"", err) -+ return nil, err -+ } -+ -+ jobs := make([]types.Job, 0, len(offsets)) -+ for _, partition := range offsets { -+ // kadm.GroupMemberLag contains valid Commit.At even when consumer group never committed any offset. -+ // no additional validation is needed here -+ startOffset := partition.Commit.At + 1 -+ endOffset := min(startOffset+p.targetRecordCount, partition.End.Offset) -+ -+ job := types.Job{ -+ Partition: int(partition.Partition), -+ Offsets: types.Offsets{ -+ Min: startOffset, -+ Max: endOffset, -+ }, -+ } -+ -+ jobs = append(jobs, job) -+ } -+ -+ return jobs, nil -+} -+ -+// Targets consuming records spanning a configured period. -+// This is a stateless planner, it is upto the caller to deduplicate or update jobs that are already in queue or progress. -+type TimeRangePlanner struct { -+ offsetReader OffsetReader -+ -+ buffer time.Duration -+ targetPeriod time.Duration -+ now func() time.Time -+ -+ logger log.Logger -+} -+ -+func NewTimeRangePlanner(interval time.Duration, offsetReader OffsetReader, now func() time.Time, logger log.Logger) *TimeRangePlanner { -+ return &TimeRangePlanner{ -+ targetPeriod: interval, -+ buffer: interval, -+ offsetReader: offsetReader, -+ now: now, -+ logger: logger, -+ } -+} -+ -+func (p *TimeRangePlanner) Name() string { -+ return TimeRangeStrategy -+} -+ -+func (p *TimeRangePlanner) Plan(ctx context.Context) ([]types.Job, error) { -+ // truncate to the nearest Interval -+ consumeUptoTS := p.now().Add(-p.buffer).Truncate(p.targetPeriod) -+ -+ // this will return the latest offset in the partition if no records are produced after this ts. -+ consumeUptoOffsets, err := p.offsetReader.ListOffsetsAfterMilli(ctx, consumeUptoTS.UnixMilli()) -+ if err != nil { -+ level.Error(p.logger).Log(""msg"", ""failed to list offsets after timestamp"", ""err"", err) -+ return nil, err -+ } -+ -+ offsets, err := p.offsetReader.GroupLag(ctx) -+ if err != nil { -+ level.Error(p.logger).Log(""msg"", ""failed to get group lag"", ""err"", err) -+ return nil, err -+ } -+ -+ var jobs []types.Job -+ for _, partitionOffset := range offsets { -+ startOffset := partitionOffset.Commit.At + 1 -+ // TODO: we could further break down the work into Interval sized chunks if this partition has pending records spanning a long time range -+ // or have the builder consume in chunks and commit the job status back to scheduler. -+ endOffset := consumeUptoOffsets[partitionOffset.Partition].Offset -+ -+ if startOffset >= endOffset { -+ level.Info(p.logger).Log(""msg"", ""no pending records to process"", ""partition"", partitionOffset.Partition, -+ ""commitOffset"", partitionOffset.Commit.At, -+ ""consumeUptoOffset"", consumeUptoOffsets[partitionOffset.Partition].Offset) -+ continue -+ } -+ -+ jobs = append(jobs, types.Job{ -+ Partition: int(partitionOffset.Partition), -+ Offsets: types.Offsets{ -+ Min: startOffset, -+ Max: endOffset, -+ }, -+ }) -+ } -+ -+ return jobs, nil -+} -diff --git a/pkg/blockbuilder/scheduler/strategy_test.go b/pkg/blockbuilder/scheduler/strategy_test.go -new file mode 100644 -index 0000000000000..eb4704f268c74 ---- /dev/null -+++ b/pkg/blockbuilder/scheduler/strategy_test.go -@@ -0,0 +1,159 @@ -+package scheduler -+ -+import ( -+ ""context"" -+ ""testing"" -+ ""time"" -+ -+ ""github.com/go-kit/log"" -+ ""github.com/stretchr/testify/require"" -+ ""github.com/twmb/franz-go/pkg/kadm"" -+ -+ ""github.com/grafana/loki/v3/pkg/blockbuilder/types"" -+) -+ -+func TestTimeRangePlanner_Plan(t *testing.T) { -+ interval := 15 * time.Minute -+ for _, tc := range []struct { -+ name string -+ now time.Time -+ expectedJobs []types.Job -+ groupLag map[int32]kadm.GroupMemberLag -+ consumeUpto map[int32]kadm.ListedOffset -+ }{ -+ { -+ // Interval 1 -+ // now: 00:42:00. consume until 00:15:00 -+ // last consumed offset 100 with record ts: 00:10:00 -+ // record offset with ts after 00:15:00 - offset 200 -+ // resulting jobs: [100, 200] -+ name: ""normal case. schedule first interval"", -+ now: time.Date(0, 0, 0, 0, 42, 0, 0, time.UTC), // 00:42:00 -+ groupLag: map[int32]kadm.GroupMemberLag{ -+ 0: { -+ Commit: kadm.Offset{ -+ At: 100, -+ }, -+ Partition: 0, -+ }, -+ }, -+ consumeUpto: map[int32]kadm.ListedOffset{ -+ 0: { -+ Offset: 200, -+ }, -+ }, -+ expectedJobs: []types.Job{ -+ { -+ Partition: 0, -+ Offsets: types.Offsets{Min: 101, Max: 200}, -+ }, -+ }, -+ }, -+ { -+ // Interval 2 -+ // now: 00:46:00. consume until 00:30:00 -+ // last consumed offset 199 with record ts: 00:11:00 -+ // record offset with ts after 00:30:00 - offset 300 -+ // resulting jobs: [200, 300] -+ name: ""normal case. schedule second interval"", -+ now: time.Date(0, 0, 0, 0, 46, 0, 0, time.UTC), // 00:46:00 -+ groupLag: map[int32]kadm.GroupMemberLag{ -+ 0: { -+ Commit: kadm.Offset{ -+ At: 199, -+ }, -+ Partition: 0, -+ }, -+ 1: { -+ Commit: kadm.Offset{ -+ At: 11, -+ }, -+ Partition: 1, -+ }, -+ }, -+ consumeUpto: map[int32]kadm.ListedOffset{ -+ 0: { -+ Offset: 300, -+ }, -+ 1: { -+ Offset: 123, -+ }, -+ }, -+ expectedJobs: []types.Job{ -+ { -+ Partition: 0, -+ Offsets: types.Offsets{Min: 200, Max: 300}, -+ }, -+ { -+ Partition: 1, -+ Offsets: types.Offsets{Min: 12, Max: 123}, -+ }, -+ }, -+ }, -+ { -+ // Interval 2 - run scheduling again -+ // now: 00:48:00. consume until 00:30:00 -+ // last consumed offset 299 -+ // record offset with ts after 00:30:00 - offset 300 -+ // no jobs to schedule for partition 0 -+ name: ""no pending records to consume. schedule second interval once more time"", -+ now: time.Date(0, 0, 0, 0, 48, 0, 0, time.UTC), // 00:48:00 -+ groupLag: map[int32]kadm.GroupMemberLag{ -+ 0: { -+ Commit: kadm.Offset{ -+ At: 299, -+ }, -+ Partition: 0, -+ }, -+ 1: { -+ Commit: kadm.Offset{ -+ At: 11, -+ }, -+ Partition: 1, -+ }, -+ }, -+ consumeUpto: map[int32]kadm.ListedOffset{ -+ 0: { -+ Offset: 300, -+ }, -+ // still pending. assume no builder were assigned -+ 1: { -+ Offset: 123, -+ }, -+ }, -+ expectedJobs: []types.Job{ -+ { -+ Partition: 1, -+ Offsets: types.Offsets{Min: 12, Max: 123}, -+ }, -+ }, -+ }, -+ } { -+ t.Run(tc.name, func(t *testing.T) { -+ mockOffsetReader := &mockOffsetReader{ -+ offsetsAfterMilli: tc.consumeUpto, -+ groupLag: tc.groupLag, -+ } -+ planner := NewTimeRangePlanner(interval, mockOffsetReader, func() time.Time { return tc.now }, log.NewNopLogger()) -+ -+ jobs, err := planner.Plan(context.Background()) -+ require.NoError(t, err) -+ -+ require.Equal(t, len(tc.expectedJobs), len(jobs)) -+ require.Equal(t, tc.expectedJobs, jobs) -+ }) -+ } -+} -+ -+type mockOffsetReader struct { -+ offsetsAfterMilli map[int32]kadm.ListedOffset -+ groupLag map[int32]kadm.GroupMemberLag -+} -+ -+func (m *mockOffsetReader) ListOffsetsAfterMilli(_ context.Context, _ int64) (map[int32]kadm.ListedOffset, error) { -+ return m.offsetsAfterMilli, nil -+} -+ -+func (m *mockOffsetReader) GroupLag(_ context.Context) (map[int32]kadm.GroupMemberLag, error) { -+ return m.groupLag, nil -+} -diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go -index 153387035d6b5..9747a8f231f7e 100644 ---- a/pkg/loki/loki.go -+++ b/pkg/loki/loki.go -@@ -32,6 +32,7 @@ import ( - - ""github.com/grafana/loki/v3/pkg/analytics"" - blockbuilder ""github.com/grafana/loki/v3/pkg/blockbuilder/builder"" -+ blockscheduler ""github.com/grafana/loki/v3/pkg/blockbuilder/scheduler"" - ""github.com/grafana/loki/v3/pkg/bloombuild"" - ""github.com/grafana/loki/v3/pkg/bloomgateway"" - ""github.com/grafana/loki/v3/pkg/compactor"" -@@ -91,6 +92,7 @@ type Config struct { - IngesterClient ingester_client.Config `yaml:""ingester_client,omitempty""` - Ingester ingester.Config `yaml:""ingester,omitempty""` - BlockBuilder blockbuilder.Config `yaml:""block_builder,omitempty""` -+ BlockScheduler blockscheduler.Config `yaml:""block_scheduler,omitempty""` - Pattern pattern.Config `yaml:""pattern_ingester,omitempty""` - IndexGateway indexgateway.Config `yaml:""index_gateway""` - BloomBuild bloombuild.Config `yaml:""bloom_build,omitempty"" category:""experimental""` -@@ -186,6 +188,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { - c.Profiling.RegisterFlags(f) - c.KafkaConfig.RegisterFlags(f) - c.BlockBuilder.RegisterFlags(f) -+ c.BlockScheduler.RegisterFlags(f) - } - - func (c *Config) registerServerFlagsWithChangedDefaultValues(fs *flag.FlagSet) { -@@ -264,6 +267,9 @@ func (c *Config) Validate() error { - if err := c.BlockBuilder.Validate(); err != nil { - errs = append(errs, errors.Wrap(err, ""CONFIG ERROR: invalid block_builder config"")) - } -+ if err := c.BlockScheduler.Validate(); err != nil { -+ errs = append(errs, errors.Wrap(err, ""CONFIG ERROR: invalid block_scheduler config"")) -+ } - if err := c.LimitsConfig.Validate(); err != nil { - errs = append(errs, errors.Wrap(err, ""CONFIG ERROR: invalid limits_config config"")) - } -@@ -379,6 +385,7 @@ type Loki struct { - partitionRingWatcher *ring.PartitionRingWatcher - partitionRing *ring.PartitionInstanceRing - blockBuilder *blockbuilder.BlockBuilder -+ blockScheduler *blockscheduler.BlockScheduler - - ClientMetrics storage.ClientMetrics - deleteClientMetrics *deletion.DeleteRequestClientMetrics -@@ -690,6 +697,7 @@ func (t *Loki) setupModuleManager() error { - mm.RegisterModule(PatternIngester, t.initPatternIngester) - mm.RegisterModule(PartitionRing, t.initPartitionRing, modules.UserInvisibleModule) - mm.RegisterModule(BlockBuilder, t.initBlockBuilder) -+ mm.RegisterModule(BlockScheduler, t.initBlockScheduler) - - mm.RegisterModule(All, nil) - mm.RegisterModule(Read, nil) -@@ -728,6 +736,7 @@ func (t *Loki) setupModuleManager() error { - PartitionRing: {MemberlistKV, Server, Ring}, - MemberlistKV: {Server}, - BlockBuilder: {PartitionRing, Store, Server}, -+ BlockScheduler: {Server}, - - Read: {QueryFrontend, Querier}, - Write: {Ingester, Distributor, PatternIngester}, -diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go -index 994576076af3e..c4449f3c51134 100644 ---- a/pkg/loki/modules.go -+++ b/pkg/loki/modules.go -@@ -37,6 +37,7 @@ import ( - - ""github.com/grafana/loki/v3/pkg/analytics"" - blockbuilder ""github.com/grafana/loki/v3/pkg/blockbuilder/builder"" -+ blockscheduler ""github.com/grafana/loki/v3/pkg/blockbuilder/scheduler"" - ""github.com/grafana/loki/v3/pkg/bloombuild/builder"" - ""github.com/grafana/loki/v3/pkg/bloombuild/planner"" - bloomprotos ""github.com/grafana/loki/v3/pkg/bloombuild/protos"" -@@ -49,6 +50,7 @@ import ( - ""github.com/grafana/loki/v3/pkg/distributor"" - ""github.com/grafana/loki/v3/pkg/indexgateway"" - ""github.com/grafana/loki/v3/pkg/ingester"" -+ kclient ""github.com/grafana/loki/v3/pkg/kafka/client"" - ""github.com/grafana/loki/v3/pkg/kafka/partition"" - ""github.com/grafana/loki/v3/pkg/kafka/partitionring"" - ""github.com/grafana/loki/v3/pkg/logproto"" -@@ -139,6 +141,7 @@ const ( - InitCodec string = ""init-codec"" - PartitionRing string = ""partition-ring"" - BlockBuilder string = ""block-builder"" -+ BlockScheduler string = ""block-scheduler"" - ) - - const ( -@@ -1863,6 +1866,22 @@ func (t *Loki) initBlockBuilder() (services.Service, error) { - return t.blockBuilder, nil - } - -+func (t *Loki) initBlockScheduler() (services.Service, error) { -+ logger := log.With(util_log.Logger, ""component"", ""block_scheduler"") -+ -+ clientMetrics := kclient.NewReaderClientMetrics(""block-scheduler"", prometheus.DefaultRegisterer) -+ c, err := kclient.NewReaderClient( -+ t.Cfg.KafkaConfig, -+ clientMetrics, -+ log.With(logger, ""component"", ""kafka-client""), -+ ) -+ if err != nil { -+ return nil, fmt.Errorf(""creating kafka client: %w"", err) -+ } -+ offsetReader := blockscheduler.NewOffsetReader(t.Cfg.KafkaConfig.Topic, t.Cfg.BlockScheduler.ConsumerGroup, t.Cfg.BlockScheduler.LookbackPeriod, c) -+ return blockscheduler.NewScheduler(t.Cfg.BlockScheduler, blockscheduler.NewJobQueue(), offsetReader, logger, prometheus.DefaultRegisterer), nil -+} -+ - func (t *Loki) deleteRequestsClient(clientType string, limits limiter.CombinedLimits) (deletion.DeleteRequestsClient, error) { - if !t.supportIndexDeleteRequest() || !t.Cfg.CompactorConfig.RetentionEnabled { - return deletion.NewNoOpDeleteRequestsStore(), nil",feat,adds service and basic planner support for scheduler (#15200) -0e2ae1ddab174772f7bb283791797e7f1c653f85,2019-11-06 01:03:38,Robert Fratto,"ci: fix drone deploy job (#1232) - -The deploy job stopped working for some unknown reason. This commit -fixes it by moving the deploy script to its own file.",False,"diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet -index 764fc20bc5e33..af6530467597d 100644 ---- a/.drone/drone.jsonnet -+++ b/.drone/drone.jsonnet -@@ -43,7 +43,7 @@ local docker(arch, app) = { - }, - }; - --local arch_image(arch,tags='') = { -+local arch_image(arch, tags='') = { - platform: { - os: 'linux', - arch: arch, -@@ -59,8 +59,8 @@ local arch_image(arch,tags='') = { - }], - }; - --local fluentbit() = pipeline('fluent-bit-amd64') + arch_image('amd64','latest,master') { -- steps+: [ -+local fluentbit() = pipeline('fluent-bit-amd64') + arch_image('amd64', 'latest,master') { -+ steps+: [ - // dry run for everything that is not tag or master - docker('amd64', 'fluent-bit') { - depends_on: ['image-tag'], -@@ -69,7 +69,7 @@ local fluentbit() = pipeline('fluent-bit-amd64') + arch_image('amd64','latest,ma - dry_run: true, - repo: 'grafana/fluent-bit-plugin-loki', - }, -- } -+ }, - ] + [ - // publish for tag or master - docker('amd64', 'fluent-bit') { -@@ -78,7 +78,7 @@ local fluentbit() = pipeline('fluent-bit-amd64') + arch_image('amd64','latest,ma - settings+: { - repo: 'grafana/fluent-bit-plugin-loki', - }, -- } -+ }, - ], - depends_on: ['check'], - }; -@@ -130,8 +130,8 @@ local manifest(apps) = pipeline('manifest') { - local drone = [ - pipeline('check') { - workspace: { -- base: ""/src"", -- path: ""loki"" -+ base: '/src', -+ path: 'loki', - }, - steps: [ - make('test', container=false) { depends_on: ['clone'] }, -@@ -144,28 +144,28 @@ local drone = [ - multiarch_image(arch) - for arch in archs - ] + [ -- fluentbit() -+ fluentbit(), - ] + [ - manifest(['promtail', 'loki', 'loki-canary']) { - trigger: condition('include').tagMaster, - }, - ] + [ -- pipeline(""deploy"") { -+ pipeline('deploy') { - trigger: condition('include').tagMaster, -- depends_on: [""manifest""], -+ depends_on: ['manifest'], - steps: [ - { -- name: ""trigger"", -+ name: 'trigger', - image: 'grafana/loki-build-image:%s' % build_image_version, - environment: { -- CIRCLE_TOKEN: {from_secret: ""circle_token""} -+ CIRCLE_TOKEN: { from_secret: 'circle_token' }, - }, - commands: [ -- 'curl -s --header ""Content-Type: application/json"" --data ""{\\""build_parameters\\"": {\\""CIRCLE_JOB\\"": \\""deploy\\"", \\""IMAGE_NAMES\\"": \\""$(make print-images)\\""}}"" --request POST https://circleci.com/api/v1.1/project/github/raintank/deployment_tools/tree/master?circle-token=$CIRCLE_TOKEN' -- ] -- } -+ './tools/deploy.sh', -+ ], -+ }, - ], -- } -+ }, - ]; - - { -diff --git a/.drone/drone.yml b/.drone/drone.yml -index c3dfacb6b19da..33508bb30b7ef 100644 ---- a/.drone/drone.yml -+++ b/.drone/drone.yml -@@ -1,502 +1,500 @@ - kind: pipeline - name: check - steps: --- commands: -- - make BUILD_IN_CONTAINER=false test -- depends_on: -- - clone -- image: grafana/loki-build-image:0.7.4 -- name: test --- commands: -- - make BUILD_IN_CONTAINER=false lint -- depends_on: -- - clone -- image: grafana/loki-build-image:0.7.4 -- name: lint --- commands: -- - make BUILD_IN_CONTAINER=false check-generated-files -- depends_on: -- - clone -- image: grafana/loki-build-image:0.7.4 -- name: check-generated-files --- commands: -- - make BUILD_IN_CONTAINER=false check-mod -- depends_on: -- - clone -- - test -- - lint -- image: grafana/loki-build-image:0.7.4 -- name: check-mod -+ - commands: -+ - make BUILD_IN_CONTAINER=false test -+ depends_on: -+ - clone -+ image: grafana/loki-build-image:0.7.4 -+ name: test -+ - commands: -+ - make BUILD_IN_CONTAINER=false lint -+ depends_on: -+ - clone -+ image: grafana/loki-build-image:0.7.4 -+ name: lint -+ - commands: -+ - make BUILD_IN_CONTAINER=false check-generated-files -+ depends_on: -+ - clone -+ image: grafana/loki-build-image:0.7.4 -+ name: check-generated-files -+ - commands: -+ - make BUILD_IN_CONTAINER=false check-mod -+ depends_on: -+ - clone -+ - test -+ - lint -+ image: grafana/loki-build-image:0.7.4 -+ name: check-mod - workspace: - base: /src - path: loki - --- - depends_on: --- check -+ - check - kind: pipeline - name: docker-amd64 - platform: - arch: amd64 - os: linux - steps: --- commands: -- - apk add --no-cache bash git -- - git fetch origin --tags -- - echo $(./tools/image-tag)-amd64 > .tags -- image: alpine -- name: image-tag --- depends_on: -- - image-tag -- image: plugins/docker -- name: build-loki-image -- settings: -- dockerfile: cmd/loki/Dockerfile -- dry_run: true -- password: -- from_secret: docker_password -- repo: grafana/loki -- username: -- from_secret: docker_username -- when: -- ref: -- exclude: -- - refs/heads/master -- - refs/tags/v* --- depends_on: -- - image-tag -- image: plugins/docker -- name: build-loki-canary-image -- settings: -- dockerfile: cmd/loki-canary/Dockerfile -- dry_run: true -- password: -- from_secret: docker_password -- repo: grafana/loki-canary -- username: -- from_secret: docker_username -- when: -- ref: -- exclude: -- - refs/heads/master -- - refs/tags/v* --- depends_on: -- - image-tag -- image: plugins/docker -- name: build-promtail-image -- settings: -- dockerfile: cmd/promtail/Dockerfile -- dry_run: true -- password: -- from_secret: docker_password -- repo: grafana/promtail -- username: -- from_secret: docker_username -- when: -- ref: -- exclude: -- - refs/heads/master -- - refs/tags/v* --- depends_on: -- - image-tag -- image: plugins/docker -- name: publish-loki-image -- settings: -- dockerfile: cmd/loki/Dockerfile -- dry_run: false -- password: -- from_secret: docker_password -- repo: grafana/loki -- username: -- from_secret: docker_username -- when: -- ref: -- include: -- - refs/heads/master -- - refs/tags/v* --- depends_on: -- - image-tag -- image: plugins/docker -- name: publish-loki-canary-image -- settings: -- dockerfile: cmd/loki-canary/Dockerfile -- dry_run: false -- password: -- from_secret: docker_password -- repo: grafana/loki-canary -- username: -- from_secret: docker_username -- when: -- ref: -- include: -- - refs/heads/master -- - refs/tags/v* --- depends_on: -- - image-tag -- image: plugins/docker -- name: publish-promtail-image -- settings: -- dockerfile: cmd/promtail/Dockerfile -- dry_run: false -- password: -- from_secret: docker_password -- repo: grafana/promtail -- username: -- from_secret: docker_username -- when: -- ref: -- include: -- - refs/heads/master -- - refs/tags/v* -+ - commands: -+ - apk add --no-cache bash git -+ - git fetch origin --tags -+ - echo $(./tools/image-tag)-amd64 > .tags -+ image: alpine -+ name: image-tag -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: build-loki-image -+ settings: -+ dockerfile: cmd/loki/Dockerfile -+ dry_run: true -+ password: -+ from_secret: docker_password -+ repo: grafana/loki -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ exclude: -+ - refs/heads/master -+ - refs/tags/v* -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: build-loki-canary-image -+ settings: -+ dockerfile: cmd/loki-canary/Dockerfile -+ dry_run: true -+ password: -+ from_secret: docker_password -+ repo: grafana/loki-canary -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ exclude: -+ - refs/heads/master -+ - refs/tags/v* -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: build-promtail-image -+ settings: -+ dockerfile: cmd/promtail/Dockerfile -+ dry_run: true -+ password: -+ from_secret: docker_password -+ repo: grafana/promtail -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ exclude: -+ - refs/heads/master -+ - refs/tags/v* -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: publish-loki-image -+ settings: -+ dockerfile: cmd/loki/Dockerfile -+ dry_run: false -+ password: -+ from_secret: docker_password -+ repo: grafana/loki -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ include: -+ - refs/heads/master -+ - refs/tags/v* -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: publish-loki-canary-image -+ settings: -+ dockerfile: cmd/loki-canary/Dockerfile -+ dry_run: false -+ password: -+ from_secret: docker_password -+ repo: grafana/loki-canary -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ include: -+ - refs/heads/master -+ - refs/tags/v* -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: publish-promtail-image -+ settings: -+ dockerfile: cmd/promtail/Dockerfile -+ dry_run: false -+ password: -+ from_secret: docker_password -+ repo: grafana/promtail -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ include: -+ - refs/heads/master -+ - refs/tags/v* - --- - depends_on: --- check -+ - check - kind: pipeline - name: docker-arm64 - platform: - arch: arm64 - os: linux - steps: --- commands: -- - apk add --no-cache bash git -- - git fetch origin --tags -- - echo $(./tools/image-tag)-arm64 > .tags -- image: alpine -- name: image-tag --- depends_on: -- - image-tag -- image: plugins/docker -- name: build-loki-image -- settings: -- dockerfile: cmd/loki/Dockerfile -- dry_run: true -- password: -- from_secret: docker_password -- repo: grafana/loki -- username: -- from_secret: docker_username -- when: -- ref: -- exclude: -- - refs/heads/master -- - refs/tags/v* --- depends_on: -- - image-tag -- image: plugins/docker -- name: build-loki-canary-image -- settings: -- dockerfile: cmd/loki-canary/Dockerfile -- dry_run: true -- password: -- from_secret: docker_password -- repo: grafana/loki-canary -- username: -- from_secret: docker_username -- when: -- ref: -- exclude: -- - refs/heads/master -- - refs/tags/v* --- depends_on: -- - image-tag -- image: plugins/docker -- name: build-promtail-image -- settings: -- dockerfile: cmd/promtail/Dockerfile -- dry_run: true -- password: -- from_secret: docker_password -- repo: grafana/promtail -- username: -- from_secret: docker_username -- when: -- ref: -- exclude: -- - refs/heads/master -- - refs/tags/v* --- depends_on: -- - image-tag -- image: plugins/docker -- name: publish-loki-image -- settings: -- dockerfile: cmd/loki/Dockerfile -- dry_run: false -- password: -- from_secret: docker_password -- repo: grafana/loki -- username: -- from_secret: docker_username -- when: -- ref: -- include: -- - refs/heads/master -- - refs/tags/v* --- depends_on: -- - image-tag -- image: plugins/docker -- name: publish-loki-canary-image -- settings: -- dockerfile: cmd/loki-canary/Dockerfile -- dry_run: false -- password: -- from_secret: docker_password -- repo: grafana/loki-canary -- username: -- from_secret: docker_username -- when: -- ref: -- include: -- - refs/heads/master -- - refs/tags/v* --- depends_on: -- - image-tag -- image: plugins/docker -- name: publish-promtail-image -- settings: -- dockerfile: cmd/promtail/Dockerfile -- dry_run: false -- password: -- from_secret: docker_password -- repo: grafana/promtail -- username: -- from_secret: docker_username -- when: -- ref: -- include: -- - refs/heads/master -- - refs/tags/v* -+ - commands: -+ - apk add --no-cache bash git -+ - git fetch origin --tags -+ - echo $(./tools/image-tag)-arm64 > .tags -+ image: alpine -+ name: image-tag -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: build-loki-image -+ settings: -+ dockerfile: cmd/loki/Dockerfile -+ dry_run: true -+ password: -+ from_secret: docker_password -+ repo: grafana/loki -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ exclude: -+ - refs/heads/master -+ - refs/tags/v* -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: build-loki-canary-image -+ settings: -+ dockerfile: cmd/loki-canary/Dockerfile -+ dry_run: true -+ password: -+ from_secret: docker_password -+ repo: grafana/loki-canary -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ exclude: -+ - refs/heads/master -+ - refs/tags/v* -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: build-promtail-image -+ settings: -+ dockerfile: cmd/promtail/Dockerfile -+ dry_run: true -+ password: -+ from_secret: docker_password -+ repo: grafana/promtail -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ exclude: -+ - refs/heads/master -+ - refs/tags/v* -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: publish-loki-image -+ settings: -+ dockerfile: cmd/loki/Dockerfile -+ dry_run: false -+ password: -+ from_secret: docker_password -+ repo: grafana/loki -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ include: -+ - refs/heads/master -+ - refs/tags/v* -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: publish-loki-canary-image -+ settings: -+ dockerfile: cmd/loki-canary/Dockerfile -+ dry_run: false -+ password: -+ from_secret: docker_password -+ repo: grafana/loki-canary -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ include: -+ - refs/heads/master -+ - refs/tags/v* -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: publish-promtail-image -+ settings: -+ dockerfile: cmd/promtail/Dockerfile -+ dry_run: false -+ password: -+ from_secret: docker_password -+ repo: grafana/promtail -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ include: -+ - refs/heads/master -+ - refs/tags/v* - --- - depends_on: --- check -+ - check - kind: pipeline - name: docker-arm - platform: - arch: arm - os: linux - steps: --- commands: -- - apk add --no-cache bash git -- - git fetch origin --tags -- - echo $(./tools/image-tag)-arm > .tags -- image: alpine -- name: image-tag --- depends_on: -- - image-tag -- image: plugins/docker -- name: build-loki-image -- settings: -- dockerfile: cmd/loki/Dockerfile -- dry_run: true -- password: -- from_secret: docker_password -- repo: grafana/loki -- username: -- from_secret: docker_username -- when: -- ref: -- exclude: -- - refs/heads/master -- - refs/tags/v* --- depends_on: -- - image-tag -- image: plugins/docker -- name: build-loki-canary-image -- settings: -- dockerfile: cmd/loki-canary/Dockerfile -- dry_run: true -- password: -- from_secret: docker_password -- repo: grafana/loki-canary -- username: -- from_secret: docker_username -- when: -- ref: -- exclude: -- - refs/heads/master -- - refs/tags/v* --- depends_on: -- - image-tag -- image: plugins/docker -- name: build-promtail-image -- settings: -- dockerfile: cmd/promtail/Dockerfile -- dry_run: true -- password: -- from_secret: docker_password -- repo: grafana/promtail -- username: -- from_secret: docker_username -- when: -- ref: -- exclude: -- - refs/heads/master -- - refs/tags/v* --- depends_on: -- - image-tag -- image: plugins/docker -- name: publish-loki-image -- settings: -- dockerfile: cmd/loki/Dockerfile -- dry_run: false -- password: -- from_secret: docker_password -- repo: grafana/loki -- username: -- from_secret: docker_username -- when: -- ref: -- include: -- - refs/heads/master -- - refs/tags/v* --- depends_on: -- - image-tag -- image: plugins/docker -- name: publish-loki-canary-image -- settings: -- dockerfile: cmd/loki-canary/Dockerfile -- dry_run: false -- password: -- from_secret: docker_password -- repo: grafana/loki-canary -- username: -- from_secret: docker_username -- when: -- ref: -- include: -- - refs/heads/master -- - refs/tags/v* --- depends_on: -- - image-tag -- image: plugins/docker -- name: publish-promtail-image -- settings: -- dockerfile: cmd/promtail/Dockerfile -- dry_run: false -- password: -- from_secret: docker_password -- repo: grafana/promtail -- username: -- from_secret: docker_username -- when: -- ref: -- include: -- - refs/heads/master -- - refs/tags/v* -+ - commands: -+ - apk add --no-cache bash git -+ - git fetch origin --tags -+ - echo $(./tools/image-tag)-arm > .tags -+ image: alpine -+ name: image-tag -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: build-loki-image -+ settings: -+ dockerfile: cmd/loki/Dockerfile -+ dry_run: true -+ password: -+ from_secret: docker_password -+ repo: grafana/loki -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ exclude: -+ - refs/heads/master -+ - refs/tags/v* -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: build-loki-canary-image -+ settings: -+ dockerfile: cmd/loki-canary/Dockerfile -+ dry_run: true -+ password: -+ from_secret: docker_password -+ repo: grafana/loki-canary -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ exclude: -+ - refs/heads/master -+ - refs/tags/v* -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: build-promtail-image -+ settings: -+ dockerfile: cmd/promtail/Dockerfile -+ dry_run: true -+ password: -+ from_secret: docker_password -+ repo: grafana/promtail -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ exclude: -+ - refs/heads/master -+ - refs/tags/v* -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: publish-loki-image -+ settings: -+ dockerfile: cmd/loki/Dockerfile -+ dry_run: false -+ password: -+ from_secret: docker_password -+ repo: grafana/loki -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ include: -+ - refs/heads/master -+ - refs/tags/v* -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: publish-loki-canary-image -+ settings: -+ dockerfile: cmd/loki-canary/Dockerfile -+ dry_run: false -+ password: -+ from_secret: docker_password -+ repo: grafana/loki-canary -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ include: -+ - refs/heads/master -+ - refs/tags/v* -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: publish-promtail-image -+ settings: -+ dockerfile: cmd/promtail/Dockerfile -+ dry_run: false -+ password: -+ from_secret: docker_password -+ repo: grafana/promtail -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ include: -+ - refs/heads/master -+ - refs/tags/v* - --- - depends_on: --- check -+ - check - kind: pipeline - name: fluent-bit-amd64 - platform: - arch: amd64 - os: linux - steps: --- commands: -- - apk add --no-cache bash git -- - git fetch origin --tags -- - echo $(./tools/image-tag)-amd64 > .tags -- - echo "",latest,master"" >> .tags -- image: alpine -- name: image-tag --- depends_on: -- - image-tag -- image: plugins/docker -- name: build-fluent-bit-image -- settings: -- dockerfile: cmd/fluent-bit/Dockerfile -- dry_run: true -- password: -- from_secret: docker_password -- repo: grafana/fluent-bit-plugin-loki -- username: -- from_secret: docker_username -- when: -- ref: -- exclude: -- - refs/heads/master -- - refs/tags/v* --- depends_on: -- - image-tag -- image: plugins/docker -- name: publish-fluent-bit-image -- settings: -- dockerfile: cmd/fluent-bit/Dockerfile -- dry_run: false -- password: -- from_secret: docker_password -- repo: grafana/fluent-bit-plugin-loki -- username: -- from_secret: docker_username -- when: -- ref: -- include: -- - refs/heads/master -- - refs/tags/v* -+ - commands: -+ - apk add --no-cache bash git -+ - git fetch origin --tags -+ - echo $(./tools/image-tag)-amd64 > .tags -+ - echo "",latest,master"" >> .tags -+ image: alpine -+ name: image-tag -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: build-fluent-bit-image -+ settings: -+ dockerfile: cmd/fluent-bit/Dockerfile -+ dry_run: true -+ password: -+ from_secret: docker_password -+ repo: grafana/fluent-bit-plugin-loki -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ exclude: -+ - refs/heads/master -+ - refs/tags/v* -+ - depends_on: -+ - image-tag -+ image: plugins/docker -+ name: publish-fluent-bit-image -+ settings: -+ dockerfile: cmd/fluent-bit/Dockerfile -+ dry_run: false -+ password: -+ from_secret: docker_password -+ repo: grafana/fluent-bit-plugin-loki -+ username: -+ from_secret: docker_username -+ when: -+ ref: -+ include: -+ - refs/heads/master -+ - refs/tags/v* - --- - depends_on: --- docker-amd64 --- docker-arm64 --- docker-arm -+ - docker-amd64 -+ - docker-arm64 -+ - docker-arm - kind: pipeline - name: manifest - steps: --- depends_on: -- - clone -- image: plugins/manifest -- name: manifest-promtail -- settings: -- ignore_missing: true -- password: -- from_secret: docker_password -- spec: .drone/docker-manifest.tmpl -- target: promtail -- username: -- from_secret: docker_username --- depends_on: -- - clone -- image: plugins/manifest -- name: manifest-loki -- settings: -- ignore_missing: true -- password: -- from_secret: docker_password -- spec: .drone/docker-manifest.tmpl -- target: loki -- username: -- from_secret: docker_username --- depends_on: -- - clone -- image: plugins/manifest -- name: manifest-loki-canary -- settings: -- ignore_missing: true -- password: -- from_secret: docker_password -- spec: .drone/docker-manifest.tmpl -- target: loki-canary -- username: -- from_secret: docker_username -+ - depends_on: -+ - clone -+ image: plugins/manifest -+ name: manifest-promtail -+ settings: -+ ignore_missing: true -+ password: -+ from_secret: docker_password -+ spec: .drone/docker-manifest.tmpl -+ target: promtail -+ username: -+ from_secret: docker_username -+ - depends_on: -+ - clone -+ image: plugins/manifest -+ name: manifest-loki -+ settings: -+ ignore_missing: true -+ password: -+ from_secret: docker_password -+ spec: .drone/docker-manifest.tmpl -+ target: loki -+ username: -+ from_secret: docker_username -+ - depends_on: -+ - clone -+ image: plugins/manifest -+ name: manifest-loki-canary -+ settings: -+ ignore_missing: true -+ password: -+ from_secret: docker_password -+ spec: .drone/docker-manifest.tmpl -+ target: loki-canary -+ username: -+ from_secret: docker_username - trigger: - ref: - include: -- - refs/heads/master -- - refs/tags/v* -+ - refs/heads/master -+ - refs/tags/v* - --- - depends_on: --- manifest -+ - manifest - kind: pipeline - name: deploy - steps: --- commands: -- - 'curl -s --header ""Content-Type: application/json"" --data ""{\""build_parameters\"": -- {\""CIRCLE_JOB\"": \""deploy\"", \""IMAGE_NAMES\"": \""$(make print-images)\""}}"" --request -- POST https://circleci.com/api/v1.1/project/github/raintank/deployment_tools/tree/master?circle-token=$CIRCLE_TOKEN' -- environment: -- CIRCLE_TOKEN: -- from_secret: circle_token -- image: grafana/loki-build-image:0.7.4 -- name: trigger -+ - commands: -+ - ./tools/deploy.sh -+ environment: -+ CIRCLE_TOKEN: -+ from_secret: circle_token -+ image: grafana/loki-build-image:0.7.4 -+ name: trigger - trigger: - ref: - include: -- - refs/heads/master -- - refs/tags/v* -+ - refs/heads/master -+ - refs/tags/v* -diff --git a/tools/deploy.sh b/tools/deploy.sh -new file mode 100755 -index 0000000000000..d1925396541d1 ---- /dev/null -+++ b/tools/deploy.sh -@@ -0,0 +1,6 @@ -+#!/usr/bin/env bash -+ -+curl -s --header ""Content-Type: application/json"" \ -+ --data ""{\""build_parameters\"":{\""CIRCLE_JOB\"": \""deploy\"", \""IMAGE_NAMES\"": \""$(make print-images)\""}}"" \ -+ --request POST \ -+ https://circleci.com/api/v1.1/project/github/raintank/deployment_tools/tree/master?circle-token=$CIRCLE_TOKEN",ci,"fix drone deploy job (#1232) - -The deploy job stopped working for some unknown reason. This commit -fixes it by moving the deploy script to its own file." -b80f37b72478304ce39d1cb508c76515641dc4e6,2020-10-26 02:39:01,Ed Welch,log any chunk fetch failure (#2804),False,"diff --git a/pkg/storage/batch.go b/pkg/storage/batch.go -index 9fcf5989cc482..471860c0cac2a 100644 ---- a/pkg/storage/batch.go -+++ b/pkg/storage/batch.go -@@ -656,6 +656,7 @@ func fetchLazyChunks(ctx context.Context, chunks []*LazyChunk) error { - } - chks, err := fetcher.FetchChunks(ctx, chks, keys) - if err != nil { -+ level.Error(util.Logger).Log(""msg"", ""error fetching chunks"", ""err"", err) - if isInvalidChunkError(err) { - level.Error(util.Logger).Log(""msg"", ""checksum of chunks does not match"", ""err"", chunk.ErrInvalidChecksum) - errChan <- nil",unknown,log any chunk fetch failure (#2804) -77cf6fa0cc2b0d93cfa7bb88b117def4123d093f,2024-06-20 15:37:09,George Robinson,chore: Update upgrade docs for -ruler.alertmanager-use-v2 (#13264),False,"diff --git a/docs/sources/setup/upgrade/_index.md b/docs/sources/setup/upgrade/_index.md -index e5abde43173d7..547da559bb1fa 100644 ---- a/docs/sources/setup/upgrade/_index.md -+++ b/docs/sources/setup/upgrade/_index.md -@@ -36,6 +36,8 @@ The output is incredibly verbose as it shows the entire internal config struct u - - ## Main / Unreleased - -+Loki changes the default value of `-ruler.alertmanager-use-v2` from `false` to `true`. Alertmanager APIv1 was deprecated in Alertmanager 0.16.0 and is removed as of 0.27.0. -+ - ## 3.0.0 - - {{% admonition type=""note"" %}}",chore,Update upgrade docs for -ruler.alertmanager-use-v2 (#13264) -c0a28cbf30f342b68766c104403fa3bfe5cd56ee,2020-04-24 19:18:08,Aditya C S,Log error message for invalid checksum (#1713),False,"diff --git a/pkg/chunkenc/interface.go b/pkg/chunkenc/interface.go -index 00381176ce5f5..a2faf8fa58308 100644 ---- a/pkg/chunkenc/interface.go -+++ b/pkg/chunkenc/interface.go -@@ -18,7 +18,7 @@ var ( - ErrOutOfOrder = errors.New(""entry out of order"") - ErrInvalidSize = errors.New(""invalid size"") - ErrInvalidFlag = errors.New(""invalid flag"") -- ErrInvalidChecksum = errors.New(""invalid checksum"") -+ ErrInvalidChecksum = errors.New(""invalid chunk checksum"") - ) - - // Encoding is the identifier for a chunk encoding. -diff --git a/pkg/chunkenc/lazy_chunk.go b/pkg/chunkenc/lazy_chunk.go -index 9cfcde600abdb..2e14c48a6f631 100644 ---- a/pkg/chunkenc/lazy_chunk.go -+++ b/pkg/chunkenc/lazy_chunk.go -@@ -15,6 +15,7 @@ import ( - // LazyChunk loads the chunk when it is accessed. - type LazyChunk struct { - Chunk chunk.Chunk -+ IsValid bool - Fetcher *chunk.Fetcher - } - -diff --git a/pkg/chunkenc/memchunk.go b/pkg/chunkenc/memchunk.go -index b3f450aa16369..e46fa12191d69 100644 ---- a/pkg/chunkenc/memchunk.go -+++ b/pkg/chunkenc/memchunk.go -@@ -11,6 +11,8 @@ import ( - ""io"" - ""time"" - -+ ""github.com/cortexproject/cortex/pkg/util"" -+ ""github.com/go-kit/kit/log/level"" - ""github.com/pkg/errors"" - - ""github.com/grafana/loki/pkg/iter"" -@@ -226,7 +228,8 @@ func NewByteChunk(b []byte, blockSize, targetSize int) (*MemChunk, error) { - // Verify checksums. - expCRC := binary.BigEndian.Uint32(b[blk.offset+l:]) - if expCRC != crc32.Checksum(blk.b, castagnoliTable) { -- return bc, ErrInvalidChecksum -+ level.Error(util.Logger).Log(""msg"", ""Checksum does not match for a block in chunk, this block will be skipped"", ""err"", ErrInvalidChecksum) -+ continue - } - - bc.blocks = append(bc.blocks, blk) -diff --git a/pkg/storage/iterator.go b/pkg/storage/iterator.go -index 847a4873c12cf..1fb96949b951f 100644 ---- a/pkg/storage/iterator.go -+++ b/pkg/storage/iterator.go -@@ -9,8 +9,10 @@ import ( - ""github.com/cortexproject/cortex/pkg/util"" - ""github.com/cortexproject/cortex/pkg/util/spanlogger"" - ""github.com/go-kit/kit/log/level"" -+ ""github.com/pkg/errors"" - ""github.com/prometheus/common/model"" - ""github.com/prometheus/prometheus/pkg/labels"" -+ ""github.com/prometheus/prometheus/promql"" - - ""github.com/grafana/loki/pkg/chunkenc"" - ""github.com/grafana/loki/pkg/iter"" -@@ -199,7 +201,7 @@ func (it *batchChunkIterator) nextBatch() (iter.EntryIterator, error) { - // │ # 47 │ - // └──────────────┘ - // ┌──────────────────────────┐ -- // │ # 48 │ -+ // │ # 48 | - // └──────────────────────────┘ - // ┌──────────────┐ - // │ # 49 │ -@@ -328,10 +330,12 @@ func buildHeapIterator(ctx context.Context, chks [][]*chunkenc.LazyChunk, filter - - // __name__ is only used for upstream compatibility and is hardcoded within loki. Strip it from the return label set. - labels := dropLabels(chks[0][0].Chunk.Metric, labels.MetricName).String() -- - for i := range chks { - iterators := make([]iter.EntryIterator, 0, len(chks[i])) - for j := range chks[i] { -+ if !chks[i][j].IsValid { -+ continue -+ } - iterator, err := chks[i][j].Iterator(ctx, from, through, direction, filter) - if err != nil { - return nil, err -@@ -388,7 +392,6 @@ func fetchLazyChunks(ctx context.Context, chunks []*chunkenc.LazyChunk) error { - errChan := make(chan error) - for fetcher, chunks := range chksByFetcher { - go func(fetcher *chunk.Fetcher, chunks []*chunkenc.LazyChunk) { -- - keys := make([]string, 0, len(chunks)) - chks := make([]chunk.Chunk, 0, len(chunks)) - index := make(map[string]*chunkenc.LazyChunk, len(chunks)) -@@ -403,8 +406,14 @@ func fetchLazyChunks(ctx context.Context, chunks []*chunkenc.LazyChunk) error { - } - chks, err := fetcher.FetchChunks(ctx, chks, keys) - if err != nil { -+ if isInvalidChunkError(err) { -+ level.Error(util.Logger).Log(""msg"", ""checksum of chunks does not match"", ""err"", chunk.ErrInvalidChecksum) -+ errChan <- nil -+ return -+ } - errChan <- err - return -+ - } - // assign fetched chunk by key as FetchChunks doesn't guarantee the order. - for _, chk := range chks { -@@ -421,7 +430,25 @@ func fetchLazyChunks(ctx context.Context, chunks []*chunkenc.LazyChunk) error { - lastErr = err - } - } -- return lastErr -+ -+ if lastErr != nil { -+ return lastErr -+ } -+ -+ for _, c := range chunks { -+ if c.Chunk.Data != nil { -+ c.IsValid = true -+ } -+ } -+ return nil -+} -+ -+func isInvalidChunkError(err error) bool { -+ err = errors.Cause(err) -+ if err, ok := err.(promql.ErrStorage); ok { -+ return err.Err == chunk.ErrInvalidChecksum || err.Err == chunkenc.ErrInvalidChecksum -+ } -+ return false - } - - func loadFirstChunks(ctx context.Context, chks map[model.Fingerprint][][]*chunkenc.LazyChunk) error { -diff --git a/pkg/storage/iterator_test.go b/pkg/storage/iterator_test.go -index 2a42251b50f39..bc55edd0c4c75 100644 ---- a/pkg/storage/iterator_test.go -+++ b/pkg/storage/iterator_test.go -@@ -6,8 +6,12 @@ import ( - ""testing"" - ""time"" - -+ ""github.com/cortexproject/cortex/pkg/chunk"" -+ ""github.com/pkg/errors"" - ""github.com/prometheus/prometheus/pkg/labels"" -+ ""github.com/prometheus/prometheus/promql"" - ""github.com/stretchr/testify/require"" -+ ""github.com/weaveworks/common/user"" - - ""github.com/grafana/loki/pkg/chunkenc"" - ""github.com/grafana/loki/pkg/iter"" -@@ -639,6 +643,130 @@ func TestPartitionOverlappingchunks(t *testing.T) { - } - } - -+func TestBuildHeapIterator(t *testing.T) { -+ var ( -+ firstChunk = newLazyChunk(logproto.Stream{ -+ Labels: ""{foo=\""bar\""}"", -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: from, -+ Line: ""1"", -+ }, -+ { -+ Timestamp: from.Add(time.Millisecond), -+ Line: ""2"", -+ }, -+ { -+ Timestamp: from.Add(2 * time.Millisecond), -+ Line: ""3"", -+ }, -+ }, -+ }) -+ secondChunk = newLazyInvalidChunk(logproto.Stream{ -+ Labels: ""{foo=\""bar\""}"", -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: from.Add(3 * time.Millisecond), -+ Line: ""4"", -+ }, -+ { -+ Timestamp: from.Add(4 * time.Millisecond), -+ Line: ""5"", -+ }, -+ }, -+ }) -+ thirdChunk = newLazyChunk(logproto.Stream{ -+ Labels: ""{foo=\""bar\""}"", -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: from.Add(5 * time.Millisecond), -+ Line: ""6"", -+ }, -+ }, -+ }) -+ ) -+ -+ for i, tc := range []struct { -+ input [][]*chunkenc.LazyChunk -+ expected []*logproto.Stream -+ }{ -+ { -+ [][]*chunkenc.LazyChunk{ -+ {firstChunk}, -+ {thirdChunk}, -+ }, -+ []*logproto.Stream{ -+ { -+ Labels: ""{foo=\""bar\""}"", -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: from, -+ Line: ""1"", -+ }, -+ { -+ Timestamp: from.Add(time.Millisecond), -+ Line: ""2"", -+ }, -+ { -+ Timestamp: from.Add(2 * time.Millisecond), -+ Line: ""3"", -+ }, -+ { -+ Timestamp: from.Add(5 * time.Millisecond), -+ Line: ""6"", -+ }, -+ }, -+ }, -+ }, -+ }, -+ { -+ [][]*chunkenc.LazyChunk{ -+ {secondChunk}, -+ {firstChunk, thirdChunk}, -+ }, -+ []*logproto.Stream{ -+ { -+ Labels: ""{foo=\""bar\""}"", -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: from, -+ Line: ""1"", -+ }, -+ { -+ Timestamp: from.Add(time.Millisecond), -+ Line: ""2"", -+ }, -+ { -+ Timestamp: from.Add(2 * time.Millisecond), -+ Line: ""3"", -+ }, -+ { -+ Timestamp: from.Add(5 * time.Millisecond), -+ Line: ""6"", -+ }, -+ }, -+ }, -+ }, -+ }, -+ } { -+ t.Run(fmt.Sprintf(""%d"", i), func(t *testing.T) { -+ ctx = user.InjectOrgID(context.Background(), ""test-user"") -+ it, err := buildHeapIterator(ctx, tc.input, nil, logproto.FORWARD, from, from.Add(6*time.Millisecond)) -+ if err != nil { -+ t.Errorf(""buildHeapIterator error = %v"", err) -+ return -+ } -+ req := newQuery(""{foo=\""bar\""}"", from, from.Add(6*time.Millisecond), logproto.FORWARD) -+ streams, _, err := iter.ReadBatch(it, req.Limit) -+ _ = it.Close() -+ if err != nil { -+ t.Fatalf(""error reading batch %s"", err) -+ } -+ assertStream(t, tc.expected, streams.Streams) -+ }) -+ } -+} -+ - func TestDropLabels(t *testing.T) { - - for i, tc := range []struct { -@@ -680,3 +808,36 @@ func TestDropLabels(t *testing.T) { - }) - } - } -+ -+func Test_IsInvalidChunkError(t *testing.T) { -+ tests := []struct { -+ name string -+ err error -+ expectedResult bool -+ }{ -+ { -+ ""invalid chunk cheksum error from cortex"", -+ promql.ErrStorage{Err: chunk.ErrInvalidChecksum}, -+ true, -+ }, -+ { -+ ""invalid chunk cheksum error from loki"", -+ promql.ErrStorage{Err: chunkenc.ErrInvalidChecksum}, -+ true, -+ }, -+ { -+ ""cache error"", -+ promql.ErrStorage{Err: errors.New(""error fetching from cache"")}, -+ false, -+ }, -+ { -+ ""no error from cortex or loki"", -+ nil, -+ false, -+ }, -+ } -+ for _, tc := range tests { -+ result := isInvalidChunkError(tc.err) -+ require.Equal(t, tc.expectedResult, result) -+ } -+} -diff --git a/pkg/storage/store_test.go b/pkg/storage/store_test.go -index 33cd9105c19d7..03a2953301e2b 100644 ---- a/pkg/storage/store_test.go -+++ b/pkg/storage/store_test.go -@@ -342,12 +342,14 @@ func Test_store_LazyQuery(t *testing.T) { - MaxChunkBatchSize: 10, - }, - } -+ - ctx = user.InjectOrgID(context.Background(), ""test-user"") - it, err := s.LazyQuery(ctx, logql.SelectParams{QueryRequest: tt.req}) - if err != nil { - t.Errorf(""store.LazyQuery() error = %v"", err) - return - } -+ - streams, _, err := iter.ReadBatch(it, tt.req.Limit) - _ = it.Close() - if err != nil { -diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go -index 4370b744dff2d..08fcc67917545 100644 ---- a/pkg/storage/util_test.go -+++ b/pkg/storage/util_test.go -@@ -49,6 +49,15 @@ func assertStream(t *testing.T, expected, actual []*logproto.Stream) { - func newLazyChunk(stream logproto.Stream) *chunkenc.LazyChunk { - return &chunkenc.LazyChunk{ - Fetcher: nil, -+ IsValid: true, -+ Chunk: newChunk(stream), -+ } -+} -+ -+func newLazyInvalidChunk(stream logproto.Stream) *chunkenc.LazyChunk { -+ return &chunkenc.LazyChunk{ -+ Fetcher: nil, -+ IsValid: false, - Chunk: newChunk(stream), - } - } -@@ -119,6 +128,7 @@ func newMockChunkStore(streams []*logproto.Stream) *mockChunkStore { - } - return &mockChunkStore{chunks: chunks, client: &mockChunkStoreClient{chunks: chunks}} - } -+ - func (m *mockChunkStore) Put(ctx context.Context, chunks []chunk.Chunk) error { return nil } - func (m *mockChunkStore) PutOne(ctx context.Context, from, through model.Time, chunk chunk.Chunk) error { - return nil",unknown,Log error message for invalid checksum (#1713) -17390fd45d1f35342f79cba4b33f48a07f9726e9,2020-06-26 17:43:15,Lukas Grimm,"Canary: make stream configurable (#2259) - -* tried to fix canary in non kubernetes env - -* update doc",False,"diff --git a/cmd/loki-canary/main.go b/cmd/loki-canary/main.go -index cb75cf19f5175..40ea88211bec7 100644 ---- a/cmd/loki-canary/main.go -+++ b/cmd/loki-canary/main.go -@@ -32,6 +32,8 @@ func main() { - - lName := flag.String(""labelname"", ""name"", ""The label name for this instance of loki-canary to use in the log selector"") - lVal := flag.String(""labelvalue"", ""loki-canary"", ""The unique label value for this instance of loki-canary to use in the log selector"") -+ sName := flag.String(""streamname"", ""stream"", ""The stream name for this instance of loki-canary to use in the log selector"") -+ sValue := flag.String(""streamvalue"", ""stdout"", ""The unique stream value for this instance of loki-canary to use in the log selector"") - port := flag.Int(""port"", 3500, ""Port which loki-canary should expose metrics"") - addr := flag.String(""addr"", """", ""The Loki server URL:Port, e.g. loki:3100"") - tls := flag.Bool(""tls"", false, ""Does the loki connection use TLS?"") -@@ -69,7 +71,7 @@ func main() { - defer c.lock.Unlock() - - c.writer = writer.NewWriter(os.Stdout, sentChan, *interval, *size) -- c.reader = reader.NewReader(os.Stderr, receivedChan, *tls, *addr, *user, *pass, *lName, *lVal) -+ c.reader = reader.NewReader(os.Stderr, receivedChan, *tls, *addr, *user, *pass, *lName, *lVal, *sName, *sValue) - c.comparator = comparator.NewComparator(os.Stderr, *wait, *pruneInterval, *buckets, sentChan, receivedChan, c.reader, true) - } - -diff --git a/docs/operations/loki-canary.md b/docs/operations/loki-canary.md -index 5709b2f756d77..71b67ff7d8abc 100644 ---- a/docs/operations/loki-canary.md -+++ b/docs/operations/loki-canary.md -@@ -254,10 +254,16 @@ All options: - Frequency to check sent vs received logs, also the frequency which queries for missing logs will be dispatched to loki (default 1m0s) - -size int - Size in bytes of each log line (default 100) -+ -streamname string -+ The stream name for this instance of loki-canary to use in the log selector (default ""stream"") -+ -streamvalue string -+ The unique stream value for this instance of loki-canary to use in the log selector (default ""stdout"") - -tls - Does the loki connection use TLS? - -user string - Loki username -+ -version -+ Print this builds version information - -wait duration - Duration to wait for log entries before reporting them lost (default 1m0s) - ``` -diff --git a/pkg/canary/reader/reader.go b/pkg/canary/reader/reader.go -index ec6e9f03803a2..bc427e9f0edcc 100644 ---- a/pkg/canary/reader/reader.go -+++ b/pkg/canary/reader/reader.go -@@ -42,6 +42,8 @@ type Reader struct { - addr string - user string - pass string -+ sName string -+ sValue string - lName string - lVal string - conn *websocket.Conn -@@ -53,7 +55,7 @@ type Reader struct { - } - - func NewReader(writer io.Writer, receivedChan chan time.Time, tls bool, -- address string, user string, pass string, labelName string, labelVal string) *Reader { -+ address string, user string, pass string, labelName string, labelVal string, streamName string, streamValue string) *Reader { - h := http.Header{} - if user != """" { - h = http.Header{""Authorization"": {""Basic "" + base64.StdEncoding.EncodeToString([]byte(user+"":""+pass))}} -@@ -65,6 +67,8 @@ func NewReader(writer io.Writer, receivedChan chan time.Time, tls bool, - addr: address, - user: user, - pass: pass, -+ sName: streamName, -+ sValue: streamValue, - lName: labelName, - lVal: labelVal, - w: writer, -@@ -106,7 +110,7 @@ func (r *Reader) Query(start time.Time, end time.Time) ([]time.Time, error) { - Host: r.addr, - Path: ""/api/prom/query"", - RawQuery: fmt.Sprintf(""start=%d&end=%d"", start.UnixNano(), end.UnixNano()) + -- ""&query="" + url.QueryEscape(fmt.Sprintf(""{stream=\""stdout\"",%v=\""%v\""}"", r.lName, r.lVal)) + -+ ""&query="" + url.QueryEscape(fmt.Sprintf(""{%v=\""%v\"",%v=\""%v\""}"", r.sName, r.sValue, r.lName, r.lVal)) + - ""&limit=1000"", - } - fmt.Fprintf(r.w, ""Querying loki for missing values with query: %v\n"", u.String()) -@@ -206,7 +210,7 @@ func (r *Reader) closeAndReconnect() { - Scheme: scheme, - Host: r.addr, - Path: ""/api/prom/tail"", -- RawQuery: ""query="" + url.QueryEscape(fmt.Sprintf(""{stream=\""stdout\"",%v=\""%v\""}"", r.lName, r.lVal)), -+ RawQuery: ""query="" + url.QueryEscape(fmt.Sprintf(""{%v=\""%v\"",%v=\""%v\""}"", r.sName, r.sValue, r.lName, r.lVal)), - } - - fmt.Fprintf(r.w, ""Connecting to loki at %v, querying for label '%v' with value '%v'\n"", u.String(), r.lName, r.lVal)",Canary,"make stream configurable (#2259) - -* tried to fix canary in non kubernetes env - -* update doc" -96b4e8fd67bf76e5edab785f6637146079017fa5,2025-02-17 14:26:34,renovate[bot],"fix(deps): update dependency @radix-ui/react-tabs to v1.1.3 (main) (#16316) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json -index f7cff9627fa4b..da60c332c91a9 100644 ---- a/pkg/ui/frontend/package-lock.json -+++ b/pkg/ui/frontend/package-lock.json -@@ -2796,9 +2796,9 @@ - } - }, - ""node_modules/@radix-ui/react-tabs"": { -- ""version"": ""1.1.2"", -- ""resolved"": ""https://registry.npmjs.org/@radix-ui/react-tabs/-/react-tabs-1.1.2.tgz"", -- ""integrity"": ""sha512-9u/tQJMcC2aGq7KXpGivMm1mgq7oRJKXphDwdypPd/j21j/2znamPU8WkXgnhUaTrSFNIt8XhOyCAupg8/GbwQ=="", -+ ""version"": ""1.1.3"", -+ ""resolved"": ""https://registry.npmjs.org/@radix-ui/react-tabs/-/react-tabs-1.1.3.tgz"", -+ ""integrity"": ""sha512-9mFyI30cuRDImbmFF6O2KUJdgEOsGh9Vmx9x/Dh9tOhL7BngmQPQfwW4aejKm5OHpfWIdmeV6ySyuxoOGjtNng=="", - ""license"": ""MIT"", - ""dependencies"": { - ""@radix-ui/primitive"": ""1.1.1"", -@@ -2806,8 +2806,88 @@ - ""@radix-ui/react-direction"": ""1.1.0"", - ""@radix-ui/react-id"": ""1.1.0"", - ""@radix-ui/react-presence"": ""1.1.2"", -- ""@radix-ui/react-primitive"": ""2.0.1"", -- ""@radix-ui/react-roving-focus"": ""1.1.1"", -+ ""@radix-ui/react-primitive"": ""2.0.2"", -+ ""@radix-ui/react-roving-focus"": ""1.1.2"", -+ ""@radix-ui/react-use-controllable-state"": ""1.1.0"" -+ }, -+ ""peerDependencies"": { -+ ""@types/react"": ""*"", -+ ""@types/react-dom"": ""*"", -+ ""react"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"", -+ ""react-dom"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"" -+ }, -+ ""peerDependenciesMeta"": { -+ ""@types/react"": { -+ ""optional"": true -+ }, -+ ""@types/react-dom"": { -+ ""optional"": true -+ } -+ } -+ }, -+ ""node_modules/@radix-ui/react-tabs/node_modules/@radix-ui/react-collection"": { -+ ""version"": ""1.1.2"", -+ ""resolved"": ""https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.2.tgz"", -+ ""integrity"": ""sha512-9z54IEKRxIa9VityapoEYMuByaG42iSy1ZXlY2KcuLSEtq8x4987/N6m15ppoMffgZX72gER2uHe1D9Y6Unlcw=="", -+ ""license"": ""MIT"", -+ ""dependencies"": { -+ ""@radix-ui/react-compose-refs"": ""1.1.1"", -+ ""@radix-ui/react-context"": ""1.1.1"", -+ ""@radix-ui/react-primitive"": ""2.0.2"", -+ ""@radix-ui/react-slot"": ""1.1.2"" -+ }, -+ ""peerDependencies"": { -+ ""@types/react"": ""*"", -+ ""@types/react-dom"": ""*"", -+ ""react"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"", -+ ""react-dom"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"" -+ }, -+ ""peerDependenciesMeta"": { -+ ""@types/react"": { -+ ""optional"": true -+ }, -+ ""@types/react-dom"": { -+ ""optional"": true -+ } -+ } -+ }, -+ ""node_modules/@radix-ui/react-tabs/node_modules/@radix-ui/react-primitive"": { -+ ""version"": ""2.0.2"", -+ ""resolved"": ""https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.0.2.tgz"", -+ ""integrity"": ""sha512-Ec/0d38EIuvDF+GZjcMU/Ze6MxntVJYO/fRlCPhCaVUyPY9WTalHJw54tp9sXeJo3tlShWpy41vQRgLRGOuz+w=="", -+ ""license"": ""MIT"", -+ ""dependencies"": { -+ ""@radix-ui/react-slot"": ""1.1.2"" -+ }, -+ ""peerDependencies"": { -+ ""@types/react"": ""*"", -+ ""@types/react-dom"": ""*"", -+ ""react"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"", -+ ""react-dom"": ""^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"" -+ }, -+ ""peerDependenciesMeta"": { -+ ""@types/react"": { -+ ""optional"": true -+ }, -+ ""@types/react-dom"": { -+ ""optional"": true -+ } -+ } -+ }, -+ ""node_modules/@radix-ui/react-tabs/node_modules/@radix-ui/react-roving-focus"": { -+ ""version"": ""1.1.2"", -+ ""resolved"": ""https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.1.2.tgz"", -+ ""integrity"": ""sha512-zgMQWkNO169GtGqRvYrzb0Zf8NhMHS2DuEB/TiEmVnpr5OqPU3i8lfbxaAmC2J/KYuIQxyoQQ6DxepyXp61/xw=="", -+ ""license"": ""MIT"", -+ ""dependencies"": { -+ ""@radix-ui/primitive"": ""1.1.1"", -+ ""@radix-ui/react-collection"": ""1.1.2"", -+ ""@radix-ui/react-compose-refs"": ""1.1.1"", -+ ""@radix-ui/react-context"": ""1.1.1"", -+ ""@radix-ui/react-direction"": ""1.1.0"", -+ ""@radix-ui/react-id"": ""1.1.0"", -+ ""@radix-ui/react-primitive"": ""2.0.2"", -+ ""@radix-ui/react-use-callback-ref"": ""1.1.0"", - ""@radix-ui/react-use-controllable-state"": ""1.1.0"" - }, - ""peerDependencies"": {",fix,"update dependency @radix-ui/react-tabs to v1.1.3 (main) (#16316) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -4d3f9f5a7b483b563348c322958486825d314526,2024-10-29 14:18:42,Jason Tackaberry,"feat(logcli): add gzip compression option (#14598) - -Passing `--compress` to logcli will enable (or more accurately not disable) compression on the `http.Transport`, allowing Loki to return gzip-compressed payloads. - -This improves overall execution time and reduces data transfer by 10-15x. - -Signed-off-by: Jason Tackaberry ",False,"diff --git a/cmd/logcli/main.go b/cmd/logcli/main.go -index 976395cd4f420..e3e4034ce479d 100644 ---- a/cmd/logcli/main.go -+++ b/cmd/logcli/main.go -@@ -483,6 +483,7 @@ func newQueryClient(app *kingpin.Application) client.Client { - app.Flag(""max-backoff"", ""Maximum backoff time between retries. Can also be set using LOKI_CLIENT_MAX_BACKOFF env var."").Default(""0"").Envar(""LOKI_CLIENT_MAX_BACKOFF"").IntVar(&client.BackoffConfig.MaxBackoff) - app.Flag(""auth-header"", ""The authorization header used. Can also be set using LOKI_AUTH_HEADER env var."").Default(""Authorization"").Envar(""LOKI_AUTH_HEADER"").StringVar(&client.AuthHeader) - app.Flag(""proxy-url"", ""The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var."").Default("""").Envar(""LOKI_HTTP_PROXY_URL"").StringVar(&client.ProxyURL) -+ app.Flag(""compress"", ""Request that Loki compress returned data in transit. Can also be set using LOKI_HTTP_COMPRESSION env var."").Default(""false"").Envar(""LOKI_HTTP_COMPRESSION"").BoolVar(&client.Compression) - - return client - } -diff --git a/docs/sources/query/logcli.md b/docs/sources/query/logcli.md -index 9a7d5b18a6d09..ee282ac73a6de 100644 ---- a/docs/sources/query/logcli.md -+++ b/docs/sources/query/logcli.md -@@ -371,6 +371,7 @@ Flags: - --auth-header=""Authorization"" - The authorization header used. Can also be set using LOKI_AUTH_HEADER env var. - --proxy-url="""" The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var. -+ --compress Request that Loki compress returned data in transit. Can also be set using LOKI_HTTP_COMPRESSION env var. - --limit=30 Limit on number of entries to print. Setting it to 0 will fetch all entries. - --since=1h Lookback window. - --from=FROM Start looking for logs at this absolute time (inclusive) -@@ -465,6 +466,7 @@ Flags: - --auth-header=""Authorization"" - The authorization header used. Can also be set using LOKI_AUTH_HEADER env var. - --proxy-url="""" The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var. -+ --compress Request that Loki compress returned data in transit. Can also be set using LOKI_HTTP_COMPRESSION env var. - --limit=30 Limit on number of entries to print. Setting it to 0 will fetch all entries. - --now=NOW Time at which to execute the instant query. - --forward Scan forwards through logs. -@@ -525,6 +527,7 @@ Flags: - --auth-header=""Authorization"" - The authorization header used. Can also be set using LOKI_AUTH_HEADER env var. - --proxy-url="""" The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var. -+ --compress Request that Loki compress returned data in transit. Can also be set using LOKI_HTTP_COMPRESSION env var. - --since=1h Lookback window. - --from=FROM Start looking for labels at this absolute time (inclusive) - --to=TO Stop looking for labels at this absolute time (exclusive) -@@ -581,6 +584,7 @@ Flags: - --auth-header=""Authorization"" - The authorization header used. Can also be set using LOKI_AUTH_HEADER env var. - --proxy-url="""" The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var. -+ --compress Request that Loki compress returned data in transit. Can also be set using LOKI_HTTP_COMPRESSION env var. - --since=1h Lookback window. - --from=FROM Start looking for logs at this absolute time (inclusive) - --to=TO Stop looking for logs at this absolute time (exclusive) -@@ -633,6 +637,7 @@ Flags: - --auth-header=""Authorization"" - The authorization header used. Can also be set using LOKI_AUTH_HEADER env var. - --proxy-url="""" The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var. -+ --compress Request that Loki compress returned data in transit. Can also be set using LOKI_HTTP_COMPRESSION env var. - ``` - - ### `stats` command reference -@@ -694,6 +699,7 @@ Flags: - --auth-header=""Authorization"" - The authorization header used. Can also be set using LOKI_AUTH_HEADER env var. - --proxy-url="""" The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var. -+ --compress Request that Loki compress returned data in transit. Can also be set using LOKI_HTTP_COMPRESSION env var. - --since=1h Lookback window. - --from=FROM Start looking for logs at this absolute time (inclusive) - --to=TO Stop looking for logs at this absolute time (exclusive) -@@ -761,6 +767,7 @@ Flags: - --auth-header=""Authorization"" - The authorization header used. Can also be set using LOKI_AUTH_HEADER env var. - --proxy-url="""" The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var. -+ --compress Request that Loki compress returned data in transit. Can also be set using LOKI_HTTP_COMPRESSION env var. - --since=1h Lookback window. - --from=FROM Start looking for logs at this absolute time (inclusive) - --to=TO Stop looking for logs at this absolute time (exclusive) -@@ -833,6 +840,7 @@ Flags: - --auth-header=""Authorization"" - The authorization header used. Can also be set using LOKI_AUTH_HEADER env var. - --proxy-url="""" The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var. -+ --compress Request that Loki compress returned data in transit. Can also be set using LOKI_HTTP_COMPRESSION env var. - --since=1h Lookback window. - --from=FROM Start looking for logs at this absolute time (inclusive) - --to=TO Stop looking for logs at this absolute time (exclusive) -diff --git a/pkg/logcli/client/client.go b/pkg/logcli/client/client.go -index f2d42b353f969..1ffbfdedf0157 100644 ---- a/pkg/logcli/client/client.go -+++ b/pkg/logcli/client/client.go -@@ -88,6 +88,7 @@ type DefaultClient struct { - AuthHeader string - ProxyURL string - BackoffConfig BackoffConfig -+ Compression bool - } - - // Query uses the /api/v1/query endpoint to execute an instant query -@@ -320,6 +321,16 @@ func (c *DefaultClient) doRequest(path, query string, quiet bool, out interface{ - if c.Tripperware != nil { - client.Transport = c.Tripperware(client.Transport) - } -+ if c.Compression { -+ // NewClientFromConfig() above returns an http.Client that uses a transport which -+ // has compression explicitly disabled. Here we re-enable it. If the caller -+ // defines a custom Tripperware that isn't an http.Transport then this won't work, -+ // but in that case they control the transport anyway and can configure -+ // compression that way. -+ if transport, ok := client.Transport.(*http.Transport); ok { -+ transport.DisableCompression = false -+ } -+ } - - var resp *http.Response",feat,"add gzip compression option (#14598) - -Passing `--compress` to logcli will enable (or more accurately not disable) compression on the `http.Transport`, allowing Loki to return gzip-compressed payloads. - -This improves overall execution time and reduces data transfer by 10-15x. - -Signed-off-by: Jason Tackaberry " -895165f770d069c113acd1401c15fd5fd6659ad3,2021-09-29 00:36:03,Mathew Fleisch,Add darwin/arm64 build to release binaries in Makefile (#4189),False,"diff --git a/Makefile b/Makefile -index 4402be8257fbb..69c65985483e5 100644 ---- a/Makefile -+++ b/Makefile -@@ -248,10 +248,10 @@ cmd/migrate/migrate: $(APP_GO_FILES) cmd/migrate/main.go - GOX = gox $(GO_FLAGS) -parallel=2 -output=""dist/{{.Dir}}-{{.OS}}-{{.Arch}}"" - CGO_GOX = gox $(DYN_GO_FLAGS) -cgo -parallel=2 -output=""dist/{{.Dir}}-{{.OS}}-{{.Arch}}"" - dist: clean -- CGO_ENABLED=0 $(GOX) -osarch=""linux/amd64 linux/arm64 linux/arm darwin/amd64 windows/amd64 freebsd/amd64"" ./cmd/loki -- CGO_ENABLED=0 $(GOX) -osarch=""linux/amd64 linux/arm64 linux/arm darwin/amd64 windows/amd64 freebsd/amd64"" ./cmd/logcli -- CGO_ENABLED=0 $(GOX) -osarch=""linux/amd64 linux/arm64 linux/arm darwin/amd64 windows/amd64 freebsd/amd64"" ./cmd/loki-canary -- CGO_ENABLED=0 $(GOX) -osarch=""linux/arm64 linux/arm darwin/amd64 windows/amd64 windows/386 freebsd/amd64"" ./clients/cmd/promtail -+ CGO_ENABLED=0 $(GOX) -osarch=""linux/amd64 linux/arm64 linux/arm darwin/amd64 darwin/arm64 windows/amd64 freebsd/amd64"" ./cmd/loki -+ CGO_ENABLED=0 $(GOX) -osarch=""linux/amd64 linux/arm64 linux/arm darwin/amd64 darwin/arm64 windows/amd64 freebsd/amd64"" ./cmd/logcli -+ CGO_ENABLED=0 $(GOX) -osarch=""linux/amd64 linux/arm64 linux/arm darwin/amd64 darwin/arm64 windows/amd64 freebsd/amd64"" ./cmd/loki-canary -+ CGO_ENABLED=0 $(GOX) -osarch=""linux/arm64 linux/arm darwin/amd64 darwin/arm64 windows/amd64 windows/386 freebsd/amd64"" ./clients/cmd/promtail - CGO_ENABLED=1 $(CGO_GOX) -osarch=""linux/amd64"" ./clients/cmd/promtail - for i in dist/*; do zip -j -m $$i.zip $$i; done - pushd dist && sha256sum * > SHA256SUMS && popd",unknown,Add darwin/arm64 build to release binaries in Makefile (#4189) -7ec072dbacb208d9e6fc96d1658a7df39088922a,2022-11-04 23:08:45,Periklis Tsirakidis,operator: Add TLS support for compactor client (#7448),False,"diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md -index 71fd203b1d625..2f93223d72d85 100644 ---- a/operator/CHANGELOG.md -+++ b/operator/CHANGELOG.md -@@ -1,5 +1,6 @@ - ## Main - -+- [7448](https://github.com/grafana/loki/pull/7448) **periklis**: Add TLS support for compactor delete client - - [7596](https://github.com/grafana/loki/pull/7596) **periklis**: Fix fresh-installs with built-in cert management enabled - - [7064](https://github.com/grafana/loki/pull/7064) **periklis**: Add support for built-in cert management - - [7471](https://github.com/grafana/loki/pull/7471) **aminesnow**: Expose and migrate query_timeout in limits config -diff --git a/operator/internal/manifests/querier.go b/operator/internal/manifests/querier.go -index be11bf7d2ce67..e3023d72c23ef 100644 ---- a/operator/internal/manifests/querier.go -+++ b/operator/internal/manifests/querier.go -@@ -225,6 +225,14 @@ func configureQuerierHTTPServicePKI(deployment *appsv1.Deployment, opts Options) - func configureQuerierGRPCServicePKI(deployment *appsv1.Deployment, opts Options) error { - secretContainerSpec := corev1.Container{ - Args: []string{ -+ // Enable HTTP over TLS for compactor delete client -+ ""-boltdb.shipper.compactor.client.tls-enabled=true"", -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-cipher-suites=%s"", opts.TLSCipherSuites()), -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-min-version=%s"", opts.TLSProfile.MinTLSVersion), -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-ca-path=%s"", signingCAPath()), -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-cert-path=%s"", lokiServerGRPCTLSCert()), -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-key-path=%s"", lokiServerGRPCTLSKey()), -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-server-name=%s"", fqdn(serviceNameCompactorHTTP(opts.Name), opts.Namespace)), - // Enable GRPC over TLS for ingester client - ""-ingester.client.tls-enabled=true"", - fmt.Sprintf(""-ingester.client.tls-cipher-suites=%s"", opts.TLSCipherSuites()), -diff --git a/operator/internal/manifests/ruler.go b/operator/internal/manifests/ruler.go -index 6bdf510348b3b..36a3b6cf0a295 100644 ---- a/operator/internal/manifests/ruler.go -+++ b/operator/internal/manifests/ruler.go -@@ -301,6 +301,14 @@ func configureRulerHTTPServicePKI(statefulSet *appsv1.StatefulSet, opts Options) - func configureRulerGRPCServicePKI(sts *appsv1.StatefulSet, opts Options) error { - secretContainerSpec := corev1.Container{ - Args: []string{ -+ // Enable HTTP over TLS for compactor delete client -+ ""-boltdb.shipper.compactor.client.tls-enabled=true"", -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-cipher-suites=%s"", opts.TLSCipherSuites()), -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-min-version=%s"", opts.TLSProfile.MinTLSVersion), -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-ca-path=%s"", signingCAPath()), -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-cert-path=%s"", lokiServerGRPCTLSCert()), -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-key-path=%s"", lokiServerGRPCTLSKey()), -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-server-name=%s"", fqdn(serviceNameCompactorHTTP(opts.Name), opts.Namespace)), - // Enable GRPC over TLS for boltb-shipper index-gateway client - ""-boltdb.shipper.index-gateway-client.grpc.tls-enabled=true"", - fmt.Sprintf(""-boltdb.shipper.index-gateway-client.grpc.tls-cipher-suites=%s"", opts.TLSCipherSuites()), -diff --git a/operator/internal/manifests/service_test.go b/operator/internal/manifests/service_test.go -index 4f935b5bfd5d1..c0600301407e6 100644 ---- a/operator/internal/manifests/service_test.go -+++ b/operator/internal/manifests/service_test.go -@@ -653,6 +653,13 @@ func TestServices_WithEncryption(t *testing.T) { - fmt.Sprintf(""-querier.frontend-client.tls-server-name=%s"", fqdn(serviceNameQueryFrontendGRPC(stackName), stackNs)), - ""-querier.frontend-client.tls-min-version=VersionTLS12"", - ""-querier.frontend-client.tls-cipher-suites=cipher1,cipher2"", -+ ""-boltdb.shipper.compactor.client.tls-enabled=true"", -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-ca-path=%s"", signingCAPath()), -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-cert-path=%s"", lokiServerGRPCTLSCert()), -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-key-path=%s"", lokiServerGRPCTLSKey()), -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-server-name=%s"", fqdn(serviceNameCompactorHTTP(stackName), stackNs)), -+ ""-boltdb.shipper.compactor.client.tls-min-version=VersionTLS12"", -+ ""-boltdb.shipper.compactor.client.tls-cipher-suites=cipher1,cipher2"", - ""-boltdb.shipper.index-gateway-client.grpc.tls-enabled=true"", - fmt.Sprintf(""-boltdb.shipper.index-gateway-client.grpc.tls-ca-path=%s"", signingCAPath()), - fmt.Sprintf(""-boltdb.shipper.index-gateway-client.grpc.tls-cert-path=%s"", lokiServerGRPCTLSCert()), -@@ -814,6 +821,13 @@ func TestServices_WithEncryption(t *testing.T) { - desc: ""ruler"", - buildFunc: BuildRuler, - wantArgs: []string{ -+ ""-boltdb.shipper.compactor.client.tls-enabled=true"", -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-ca-path=%s"", signingCAPath()), -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-cert-path=%s"", lokiServerGRPCTLSCert()), -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-key-path=%s"", lokiServerGRPCTLSKey()), -+ fmt.Sprintf(""-boltdb.shipper.compactor.client.tls-server-name=%s"", fqdn(serviceNameCompactorHTTP(stackName), stackNs)), -+ ""-boltdb.shipper.compactor.client.tls-min-version=VersionTLS12"", -+ ""-boltdb.shipper.compactor.client.tls-cipher-suites=cipher1,cipher2"", - ""-boltdb.shipper.index-gateway-client.grpc.tls-enabled=true"", - fmt.Sprintf(""-boltdb.shipper.index-gateway-client.grpc.tls-ca-path=%s"", signingCAPath()), - fmt.Sprintf(""-boltdb.shipper.index-gateway-client.grpc.tls-cert-path=%s"", lokiServerGRPCTLSCert()),",operator,Add TLS support for compactor client (#7448) -25aed2b88993ac633e7563aec075c18d0fa9a5df,2020-04-13 21:58:24,Cyril Tovena,"Add no-file and keep-file log option for docker driver. (#1906) - -* Add no-file and keep-file option. - -By default no-file is false, this means the driver will create json log files that can be used to run `docker logs` command. -If no-file is true, no files are created and so `docker log` won't work. This allow the ability to run the driver without caring of space left on the host. - -keep-file tells the driver to keep file even after the container has been stopped, by default this options is false. This means files are cleaned up once containers are stopped and not history is left to be used for `docker logs`. - -Signed-off-by: Cyril Tovena - -* Review feedback ! - -Signed-off-by: Cyril Tovena ",False,"diff --git a/cmd/docker-driver/README.md b/cmd/docker-driver/README.md -index 5e2f2a6b99267..2fbab6cfda497 100644 ---- a/cmd/docker-driver/README.md -+++ b/cmd/docker-driver/README.md -@@ -49,6 +49,7 @@ docker run --log-driver=loki \ - - > **Note**: The Loki logging driver still uses the json-log driver in combination with sending logs to Loki, this is mainly useful to keep the `docker logs` command working. - > You can adjust file size and rotation using the respective log option `max-size` and `max-file`. -+> You can deactivate this behavior by setting the log option `no-file` to true. - - ### Configure the default logging driver - -@@ -115,29 +116,31 @@ You can add more labels by using `loki-external-labels`,`loki-pipeline-stage-fil - - To specify additional logging driver options, you can use the --log-opt NAME=VALUE flag. - --| Option | Required? | Default Value | Description --| ------------------------- | :-------: | :------------------: | -------------------------------------- | --| `loki-url` | Yes | | Loki HTTP push endpoint. --| `loki-external-labels` | No | `container_name={{.Name}}` | Additional label value pair separated by `,` to send with logs. The value is expanded with the [Docker tag template format](https://docs.docker.com/config/containers/logging/log_tags/). (eg: `container_name={{.ID}}.{{.Name}},cluster=prod`) --| `loki-timeout` | No | `10s` | The timeout to use when sending logs to the Loki instance. Valid time units are ""ns"", ""us"" (or ""µs""), ""ms"", ""s"", ""m"", ""h"". --| `loki-batch-wait` | No | `1s` | The amount of time to wait before sending a log batch complete or not. Valid time units are ""ns"", ""us"" (or ""µs""), ""ms"", ""s"", ""m"", ""h"". --| `loki-batch-size` | No | `102400` | The maximum size of a log batch to send. --| `loki-min-backoff` | No | `100ms` | The minimum amount of time to wait before retrying a batch. Valid time units are ""ns"", ""us"" (or ""µs""), ""ms"", ""s"", ""m"", ""h"". --| `loki-max-backoff` | No | `10s` | The maximum amount of time to wait before retrying a batch. Valid time units are ""ns"", ""us"" (or ""µs""), ""ms"", ""s"", ""m"", ""h"". --| `loki-retries` | No | `10` | The maximum amount of retries for a log batch. --| `loki-pipeline-stage-file` | No | | The location of a pipeline stage configuration file ([example](./pipeline-example.yaml)). Pipeline stages allows to parse log lines to extract more labels. [see documentation](../../docs/logentry/processing-log-lines.md) --| `loki-tenant-id` | No | | Set the tenant id (http header`X-Scope-OrgID`) when sending logs to Loki. It can be overrides by a pipeline stage. --| `loki-tls-ca-file` | No | | Set the path to a custom certificate authority. --| `loki-tls-cert-file` | No | | Set the path to a client certificate file. --| `loki-tls-key-file` | No | | Set the path to a client key. --| `loki-tls-server-name` | No | | Name used to validate the server certificate. --| `loki-tls-insecure-skip-verify` | No | `false` | Allow to skip tls verification. --| `loki-proxy-url` | No | | Proxy URL use to connect to Loki. --| `max-size` | No | -1 | The maximum size of the log before it is rolled. A positive integer plus a modifier representing the unit of measure (k, m, or g). Defaults to -1 (unlimited). This is used by json-log required to keep the `docker log` command working. --| `max-file` | No | 1 | The maximum number of log files that can be present. If rolling the logs creates excess files, the oldest file is removed. Only effective when max-size is also set. A positive integer. Defaults to 1. --| `labels` | No | | Comma-separated list of keys of labels, which should be included in message, if these labels are specified for container. --| `env` | No | | Comma-separated list of keys of environment variables to be included in message if they specified for a container. --| `env-regex` | No | | A regular expression to match logging-related environment variables. Used for advanced log label options. If there is collision between the label and env keys, the value of the env takes precedence. Both options add additional fields to the labels of a logging message. -+| Option | Required? | Default Value | Description | -+|---------------------------------|:---------:|:--------------------------:|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -+| `loki-url` | Yes | | Loki HTTP push endpoint. | -+| `loki-external-labels` | No | `container_name={{.Name}}` | Additional label value pair separated by `,` to send with logs. The value is expanded with the [Docker tag template format](https://docs.docker.com/config/containers/logging/log_tags/). (eg: `container_name={{.ID}}.{{.Name}},cluster=prod`) | -+| `loki-timeout` | No | `10s` | The timeout to use when sending logs to the Loki instance. Valid time units are ""ns"", ""us"" (or ""µs""), ""ms"", ""s"", ""m"", ""h"". | -+| `loki-batch-wait` | No | `1s` | The amount of time to wait before sending a log batch complete or not. Valid time units are ""ns"", ""us"" (or ""µs""), ""ms"", ""s"", ""m"", ""h"". | -+| `loki-batch-size` | No | `102400` | The maximum size of a log batch to send. | -+| `loki-min-backoff` | No | `100ms` | The minimum amount of time to wait before retrying a batch. Valid time units are ""ns"", ""us"" (or ""µs""), ""ms"", ""s"", ""m"", ""h"". | -+| `loki-max-backoff` | No | `10s` | The maximum amount of time to wait before retrying a batch. Valid time units are ""ns"", ""us"" (or ""µs""), ""ms"", ""s"", ""m"", ""h"". | -+| `loki-retries` | No | `10` | The maximum amount of retries for a log batch. | -+| `loki-pipeline-stage-file` | No | | The location of a pipeline stage configuration file ([example](./pipeline-example.yaml)). Pipeline stages allows to parse log lines to extract more labels. [see documentation](../../docs/logentry/processing-log-lines.md) | -+| `loki-tenant-id` | No | | Set the tenant id (http header`X-Scope-OrgID`) when sending logs to Loki. It can be overrides by a pipeline stage. | -+| `loki-tls-ca-file` | No | | Set the path to a custom certificate authority. | -+| `loki-tls-cert-file` | No | | Set the path to a client certificate file. | -+| `loki-tls-key-file` | No | | Set the path to a client key. | -+| `loki-tls-server-name` | No | | Name used to validate the server certificate. | -+| `loki-tls-insecure-skip-verify` | No | `false` | Allow to skip tls verification. | -+| `loki-proxy-url` | No | | Proxy URL use to connect to Loki. | -+| `no-file` | No | `false` | This indicates the driver to not create log files on disk, however this means you won't be able to use `docker logs` on the container anymore. You can use this if you don't need to use `docker logs` and you run with limited disk space. (By default files are created) | -+| `keep-file` | No | `false` | This indicates the driver to keep json log files once the container is stopped. By default files are removed, this means you won't be able to use `docker logs` once the container is stopped. | -+| `max-size` | No | -1 | The maximum size of the log before it is rolled. A positive integer plus a modifier representing the unit of measure (k, m, or g). Defaults to -1 (unlimited). This is used by json-log required to keep the `docker log` command working. | -+| `max-file` | No | 1 | The maximum number of log files that can be present. If rolling the logs creates excess files, the oldest file is removed. Only effective when max-size is also set. A positive integer. Defaults to 1. | -+| `labels` | No | | Comma-separated list of keys of labels, which should be included in message, if these labels are specified for container. | -+| `env` | No | | Comma-separated list of keys of environment variables to be included in message if they specified for a container. | -+| `env-regex` | No | | A regular expression to match logging-related environment variables. Used for advanced log label options. If there is collision between the label and env keys, the value of the env takes precedence. Both options add additional fields to the labels of a logging message. | - - ## Uninstall the plugin - -diff --git a/cmd/docker-driver/config.go b/cmd/docker-driver/config.go -index 67d93a421a77a..5275084f9e7b0 100644 ---- a/cmd/docker-driver/config.go -+++ b/cmd/docker-driver/config.go -@@ -40,6 +40,8 @@ const ( - cfgMaxRetriesKey = ""loki-retries"" - cfgPipelineStagesKey = ""loki-pipeline-stage-file"" - cfgTenantIDKey = ""loki-tenant-id"" -+ cfgNofile = ""no-file"" -+ cfgKeepFile = ""keep-file"" - - swarmServiceLabelKey = ""com.docker.swarm.service.name"" - swarmStackLabelKey = ""com.docker.stack.namespace"" -@@ -101,6 +103,8 @@ func validateDriverOpt(loggerInfo logger.Info) error { - case cfgMaxRetriesKey: - case cfgPipelineStagesKey: - case cfgTenantIDKey: -+ case cfgNofile: -+ case cfgKeepFile: - case ""labels"": - case ""env"": - case ""env-regex"": -@@ -321,3 +325,15 @@ func parseInt(key string, logCtx logger.Info, set func(i int)) error { - } - return nil - } -+ -+func parseBoolean(key string, logCtx logger.Info, defaultValue bool) (bool, error) { -+ value, ok := logCtx.Config[key] -+ if !ok || value == """" { -+ return defaultValue, nil -+ } -+ b, err := strconv.ParseBool(value) -+ if err != nil { -+ return false, err -+ } -+ return b, nil -+} -diff --git a/cmd/docker-driver/driver.go b/cmd/docker-driver/driver.go -index f99e5c33412fa..bbc99ac20c66e 100644 ---- a/cmd/docker-driver/driver.go -+++ b/cmd/docker-driver/driver.go -@@ -36,6 +36,10 @@ type logPair struct { - stream io.ReadCloser - info logger.Info - logger log.Logger -+ // folder where json log files will be created. -+ folder string -+ // keep created files after stopping the container. -+ keepFile bool - } - - func (l *logPair) Close() { -@@ -45,6 +49,9 @@ func (l *logPair) Close() { - if err := l.lokil.Close(); err != nil { - level.Error(l.logger).Log(""msg"", ""error while closing loki logger"", ""err"", err) - } -+ if l.jsonl == nil { -+ return -+ } - if err := l.jsonl.Close(); err != nil { - level.Error(l.logger).Log(""msg"", ""error while closing json logger"", ""err"", err) - } -@@ -65,16 +72,29 @@ func (d *driver) StartLogging(file string, logCtx logger.Info) error { - return fmt.Errorf(""logger for %q already exists"", file) - } - d.mu.Unlock() -+ folder := fmt.Sprintf(""/var/log/docker/%s/"", logCtx.ContainerID) -+ logCtx.LogPath = filepath.Join(folder, ""json.log"") - -- if logCtx.LogPath == """" { -- logCtx.LogPath = filepath.Join(""/var/log/docker"", logCtx.ContainerID) -- } -- if err := os.MkdirAll(filepath.Dir(logCtx.LogPath), 0755); err != nil { -- return errors.Wrap(err, ""error setting up logger dir"") -+ noFile, err := parseBoolean(cfgNofile, logCtx, false) -+ if err != nil { -+ return err - } -- jsonl, err := jsonfilelog.New(logCtx) -+ -+ keepFile, err := parseBoolean(cfgKeepFile, logCtx, false) - if err != nil { -- return errors.Wrap(err, ""error creating jsonfile logger"") -+ return err -+ } -+ -+ var jsonl logger.Logger -+ if !noFile { -+ if err := os.MkdirAll(folder, 0755); err != nil { -+ return errors.Wrap(err, ""error setting up logger dir"") -+ } -+ -+ jsonl, err = jsonfilelog.New(logCtx) -+ if err != nil { -+ return errors.Wrap(err, ""error creating jsonfile logger"") -+ } - } - - lokil, err := New(logCtx, d.logger) -@@ -88,7 +108,7 @@ func (d *driver) StartLogging(file string, logCtx logger.Info) error { - } - - d.mu.Lock() -- lf := &logPair{jsonl, lokil, f, logCtx, d.logger} -+ lf := &logPair{jsonl, lokil, f, logCtx, d.logger, folder, keepFile} - d.logs[file] = lf - d.idx[logCtx.ContainerID] = lf - d.mu.Unlock() -@@ -100,12 +120,19 @@ func (d *driver) StartLogging(file string, logCtx logger.Info) error { - func (d *driver) StopLogging(file string) { - level.Debug(d.logger).Log(""msg"", ""Stop logging"", ""file"", file) - d.mu.Lock() -+ defer d.mu.Unlock() - lf, ok := d.logs[file] -- if ok { -- lf.Close() -- delete(d.logs, file) -+ if !ok { -+ return -+ } -+ lf.Close() -+ delete(d.logs, file) -+ if !lf.keepFile && lf.jsonl != nil { -+ // delete the folder where all log files were created. -+ if err := os.RemoveAll(lf.folder); err != nil { -+ level.Debug(d.logger).Log(""msg"", ""error deleting folder"", ""folder"", lf.folder) -+ } - } -- d.mu.Unlock() - } - - func consumeLog(lf *logPair) { -@@ -138,10 +165,11 @@ func consumeLog(lf *logPair) { - if err := lf.lokil.Log(&msg); err != nil { - level.Error(lf.logger).Log(""msg"", ""error pushing message to loki"", ""id"", lf.info.ContainerID, ""err"", err, ""message"", msg) - } -- -- if err := lf.jsonl.Log(&msg); err != nil { -- level.Error(lf.logger).Log(""msg"", ""error writing log message"", ""id"", lf.info.ContainerID, ""err"", err, ""message"", msg) -- continue -+ if lf.jsonl != nil { -+ if err := lf.jsonl.Log(&msg); err != nil { -+ level.Error(lf.logger).Log(""msg"", ""error writing log message"", ""id"", lf.info.ContainerID, ""err"", err, ""message"", msg) -+ continue -+ } - } - - buf.Reset() -@@ -156,10 +184,14 @@ func (d *driver) ReadLogs(info logger.Info, config logger.ReadConfig) (io.ReadCl - return nil, fmt.Errorf(""logger does not exist for %s"", info.ContainerID) - } - -+ if lf.jsonl == nil { -+ return nil, fmt.Errorf(""%s option set to true, no reading capability"", cfgNofile) -+ } -+ - r, w := io.Pipe() - lr, ok := lf.jsonl.(logger.LogReader) - if !ok { -- return nil, fmt.Errorf(""logger does not support reading"") -+ return nil, errors.New(""logger does not support reading"") - } - - go func() {",unknown,"Add no-file and keep-file log option for docker driver. (#1906) - -* Add no-file and keep-file option. - -By default no-file is false, this means the driver will create json log files that can be used to run `docker logs` command. -If no-file is true, no files are created and so `docker log` won't work. This allow the ability to run the driver without caring of space left on the host. - -keep-file tells the driver to keep file even after the container has been stopped, by default this options is false. This means files are cleaned up once containers are stopped and not history is left to be used for `docker logs`. - -Signed-off-by: Cyril Tovena - -* Review feedback ! - -Signed-off-by: Cyril Tovena " -4c90d57e68a25c0dc0d5372f52a429c7d74d539c,2022-06-27 17:49:59,Mohamed-Amine Bouqsimi,operator: Support TLS enabled lokistack-gateway (Kubernetes native) (#6478),False,"diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md -index e89dc12eaeabe..d81e649334576 100644 ---- a/operator/CHANGELOG.md -+++ b/operator/CHANGELOG.md -@@ -1,5 +1,6 @@ - ## Main - -+- [6411](https://github.com/grafana/loki/pull/6478) **aminesnow**: Support TLS enabled lokistack-gateway for vanilla kubernetes deployments - - [6504](https://github.com/grafana/loki/pull/6504) **periklis**: Disable usage report on OpenShift - - [6411](https://github.com/grafana/loki/pull/6411) **Red-GV**: Extend schema validation in LokiStack webhook - - [6334](https://github.com/grafana/loki/pull/6433) **periklis**: Move operator cli flags to component config -diff --git a/operator/apis/config/v1/projectconfig_types.go b/operator/apis/config/v1/projectconfig_types.go -index eeec86ae48dce..234522509c2b3 100644 ---- a/operator/apis/config/v1/projectconfig_types.go -+++ b/operator/apis/config/v1/projectconfig_types.go -@@ -9,6 +9,7 @@ import ( - type FeatureFlags struct { - EnableCertificateSigningService bool `json:""enableCertSigningService,omitempty""` - EnableServiceMonitors bool `json:""enableServiceMonitors,omitempty""` -+ EnableTLSHTTPServices bool `json:""enableTlsHttpServices,omitempty""` - EnableTLSServiceMonitorConfig bool `json:""enableTlsServiceMonitorConfig,omitempty""` - EnableTLSGRPCServices bool `json:""enableTlsGrpcServices,omitempty""` - EnablePrometheusAlerts bool `json:""enableLokiStackAlerts,omitempty""` -diff --git a/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml b/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml -index 73421c639953d..97a91f5dbb36f 100644 ---- a/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml -+++ b/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml -@@ -18,6 +18,7 @@ data: - enableCertSigningService: true - enableServiceMonitors: true - enableTlsServiceMonitorConfig: true -+ enableTlsHttpServices: true - enableTlsGRPCServices: true - enableLokiStackAlerts: true - enableLokiStackGateway: true -diff --git a/operator/cmd/loki-broker/main.go b/operator/cmd/loki-broker/main.go -index bfb2546f27585..0e5f6e8e93281 100644 ---- a/operator/cmd/loki-broker/main.go -+++ b/operator/cmd/loki-broker/main.go -@@ -38,6 +38,7 @@ func (c *config) registerFlags(f *flag.FlagSet) { - c.featureFlags = manifests.FeatureFlags{} - f.BoolVar(&c.featureFlags.EnableCertificateSigningService, ""with-cert-signing-service"", false, ""Enable usage of cert-signing service for scraping prometheus metrics via TLS."") - f.BoolVar(&c.featureFlags.EnableServiceMonitors, ""with-service-monitors"", false, ""Enable service monitors for all LokiStack components."") -+ f.BoolVar(&c.featureFlags.EnableTLSHTTPServices, ""with-http-tls-services"", false, ""Enables TLS for lokistack-gateway."") - f.BoolVar(&c.featureFlags.EnableTLSServiceMonitorConfig, ""with-tls-service-monitors"", false, ""Enable TLS endpoint for service monitors."") - f.BoolVar(&c.featureFlags.EnablePrometheusAlerts, ""with-prometheus-alerts"", false, ""Enables prometheus alerts"") - f.BoolVar(&c.featureFlags.EnableGateway, ""with-lokistack-gateway"", false, ""Enables the manifest creation for the entire lokistack-gateway."") -diff --git a/operator/config/crd/bases/config.grafana.com_projectconfigs.yaml b/operator/config/crd/bases/config.grafana.com_projectconfigs.yaml -index aa976e9c3e9d3..a25f876fb0e2d 100644 ---- a/operator/config/crd/bases/config.grafana.com_projectconfigs.yaml -+++ b/operator/config/crd/bases/config.grafana.com_projectconfigs.yaml -@@ -78,6 +78,8 @@ spec: - type: boolean - enableTlsServiceMonitorConfig: - type: boolean -+ enableTlsHttpServices: -+ type: boolean - type: object - gracefulShutDown: - description: GracefulShutdownTimeout is the duration given to runnable -diff --git a/operator/config/overlays/openshift/controller_manager_config.yaml b/operator/config/overlays/openshift/controller_manager_config.yaml -index 62f0adc083d8d..b4148d7806515 100644 ---- a/operator/config/overlays/openshift/controller_manager_config.yaml -+++ b/operator/config/overlays/openshift/controller_manager_config.yaml -@@ -15,6 +15,7 @@ featureFlags: - enableCertSigningService: true - enableServiceMonitors: true - enableTlsServiceMonitorConfig: true -+ enableTlsHttpServices: true - enableTlsGRPCServices: true - enableLokiStackAlerts: true - enableLokiStackGateway: true -diff --git a/operator/docs/howto_connect_grafana.md b/operator/docs/howto_connect_grafana.md -index 0564001616ae1..4ad98e35253a7 100644 ---- a/operator/docs/howto_connect_grafana.md -+++ b/operator/docs/howto_connect_grafana.md -@@ -67,7 +67,7 @@ datasources: - httpHeaderValue1: ${LOKI_TENANT_ID} - ``` - --If the operator was started with the `--with-tls-service-monitors` option, then the protocol used to access the service needs to be set to `https` and, depending on the used certificate another option needs to be added to the `jsonData`: `tlsSkipVerify: true` -+If the operator was started with the `--with-http-tls-services` option, then the protocol used to access the service needs to be set to `https` and, depending on the used certificate another option needs to be added to the `jsonData`: `tlsSkipVerify: true` - - The values for the variables used in the configuration file depend on the Lokistack deployment and which Loki tenant needs to be accessed. - -diff --git a/operator/internal/manifests/build_test.go b/operator/internal/manifests/build_test.go -index c8ba075103c55..8b3fa7496cfa4 100644 ---- a/operator/internal/manifests/build_test.go -+++ b/operator/internal/manifests/build_test.go -@@ -219,6 +219,80 @@ func TestBuildAll_WithFeatureFlags_EnableCertificateSigningService(t *testing.T) - } - } - -+func TestBuildAll_WithFeatureFlags_EnableTLSHTTPServices(t *testing.T) { -+ opts := Options{ -+ Name: ""test"", -+ Namespace: ""test"", -+ Stack: lokiv1beta1.LokiStackSpec{ -+ Size: lokiv1beta1.SizeOneXSmall, -+ Rules: &lokiv1beta1.RulesSpec{ -+ Enabled: true, -+ }, -+ }, -+ Flags: FeatureFlags{ -+ EnableTLSHTTPServices: true, -+ }, -+ } -+ -+ err := ApplyDefaultSettings(&opts) -+ require.NoError(t, err) -+ objects, buildErr := BuildAll(opts) -+ require.NoError(t, buildErr) -+ -+ for _, obj := range objects { -+ var ( -+ name string -+ vs []corev1.Volume -+ vms []corev1.VolumeMount -+ args []string -+ rps corev1.URIScheme -+ lps corev1.URIScheme -+ ) -+ -+ switch o := obj.(type) { -+ case *appsv1.Deployment: -+ name = o.Name -+ vs = o.Spec.Template.Spec.Volumes -+ vms = o.Spec.Template.Spec.Containers[0].VolumeMounts -+ args = o.Spec.Template.Spec.Containers[0].Args -+ rps = o.Spec.Template.Spec.Containers[0].ReadinessProbe.ProbeHandler.HTTPGet.Scheme -+ lps = o.Spec.Template.Spec.Containers[0].LivenessProbe.ProbeHandler.HTTPGet.Scheme -+ case *appsv1.StatefulSet: -+ name = o.Name -+ vs = o.Spec.Template.Spec.Volumes -+ vms = o.Spec.Template.Spec.Containers[0].VolumeMounts -+ args = o.Spec.Template.Spec.Containers[0].Args -+ rps = o.Spec.Template.Spec.Containers[0].ReadinessProbe.ProbeHandler.HTTPGet.Scheme -+ lps = o.Spec.Template.Spec.Containers[0].LivenessProbe.ProbeHandler.HTTPGet.Scheme -+ default: -+ continue -+ } -+ -+ secretName := fmt.Sprintf(""%s-http"", name) -+ expVolume := corev1.Volume{ -+ Name: secretName, -+ VolumeSource: corev1.VolumeSource{ -+ Secret: &corev1.SecretVolumeSource{ -+ SecretName: secretName, -+ }, -+ }, -+ } -+ require.Contains(t, vs, expVolume) -+ -+ expVolumeMount := corev1.VolumeMount{ -+ Name: secretName, -+ ReadOnly: false, -+ MountPath: ""/var/run/tls/http"", -+ } -+ require.Contains(t, vms, expVolumeMount) -+ -+ require.Contains(t, args, ""-server.http-tls-cert-path=/var/run/tls/http/tls.crt"") -+ require.Contains(t, args, ""-server.http-tls-key-path=/var/run/tls/http/tls.key"") -+ require.Equal(t, corev1.URISchemeHTTPS, rps) -+ require.Equal(t, corev1.URISchemeHTTPS, lps) -+ } -+} -+ - func TestBuildAll_WithFeatureFlags_EnableTLSServiceMonitorConfig(t *testing.T) { - opts := Options{ - Name: ""test"", -@@ -231,6 +305,7 @@ func TestBuildAll_WithFeatureFlags_EnableTLSServiceMonitorConfig(t *testing.T) { - }, - Flags: FeatureFlags{ - EnableServiceMonitors: true, -+ EnableTLSHTTPServices: true, - EnableTLSServiceMonitorConfig: true, - }, - } -@@ -480,6 +555,7 @@ func TestBuildAll_WithFeatureFlags_EnableGateway(t *testing.T) { - }, - Flags: FeatureFlags{ - EnableGateway: false, -+ EnableTLSHTTPServices: true, - EnableTLSServiceMonitorConfig: false, - }, - }, -@@ -517,6 +593,7 @@ func TestBuildAll_WithFeatureFlags_EnableGateway(t *testing.T) { - }, - Flags: FeatureFlags{ - EnableGateway: true, -+ EnableTLSHTTPServices: true, - EnableTLSServiceMonitorConfig: true, - }, - }, -diff --git a/operator/internal/manifests/compactor.go b/operator/internal/manifests/compactor.go -index 52c910d4b351d..25334ec5d6194 100644 ---- a/operator/internal/manifests/compactor.go -+++ b/operator/internal/manifests/compactor.go -@@ -20,8 +20,8 @@ import ( - // BuildCompactor builds the k8s objects required to run Loki Compactor. - func BuildCompactor(opts Options) ([]client.Object, error) { - statefulSet := NewCompactorStatefulSet(opts) -- if opts.Flags.EnableTLSServiceMonitorConfig { -- if err := configureCompactorServiceMonitorPKI(statefulSet, opts.Name); err != nil { -+ if opts.Flags.EnableTLSHTTPServices { -+ if err := configureCompactorHTTPServicePKI(statefulSet, opts.Name); err != nil { - return nil, err - } - } -@@ -220,9 +220,9 @@ func NewCompactorHTTPService(opts Options) *corev1.Service { - } - } - --func configureCompactorServiceMonitorPKI(statefulSet *appsv1.StatefulSet, stackName string) error { -+func configureCompactorHTTPServicePKI(statefulSet *appsv1.StatefulSet, stackName string) error { - serviceName := serviceNameCompactorHTTP(stackName) -- return configureServiceMonitorPKI(&statefulSet.Spec.Template.Spec, serviceName) -+ return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName) - } - - func configureCompactorGRPCServicePKI(sts *appsv1.StatefulSet, stackName string) error { -diff --git a/operator/internal/manifests/distributor.go b/operator/internal/manifests/distributor.go -index 71674fe54ad6e..0fa7f4c6fdc21 100644 ---- a/operator/internal/manifests/distributor.go -+++ b/operator/internal/manifests/distributor.go -@@ -19,8 +19,8 @@ import ( - // BuildDistributor returns a list of k8s objects for Loki Distributor - func BuildDistributor(opts Options) ([]client.Object, error) { - deployment := NewDistributorDeployment(opts) -- if opts.Flags.EnableTLSServiceMonitorConfig { -- if err := configureDistributorServiceMonitorPKI(deployment, opts.Name); err != nil { -+ if opts.Flags.EnableTLSHTTPServices { -+ if err := configureDistributorHTTPServicePKI(deployment, opts.Name); err != nil { - return nil, err - } - } -@@ -196,9 +196,9 @@ func NewDistributorHTTPService(opts Options) *corev1.Service { - } - } - --func configureDistributorServiceMonitorPKI(deployment *appsv1.Deployment, stackName string) error { -+func configureDistributorHTTPServicePKI(deployment *appsv1.Deployment, stackName string) error { - serviceName := serviceNameDistributorHTTP(stackName) -- return configureServiceMonitorPKI(&deployment.Spec.Template.Spec, serviceName) -+ return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName) - } - - func configureDistributorGRPCServicePKI(deployment *appsv1.Deployment, stackName, stackNS string) error { -diff --git a/operator/internal/manifests/gateway.go b/operator/internal/manifests/gateway.go -index c98b7bf398d33..38cc884aa790e 100644 ---- a/operator/internal/manifests/gateway.go -+++ b/operator/internal/manifests/gateway.go -@@ -40,7 +40,7 @@ func BuildGateway(opts Options) ([]client.Object, error) { - - objs := []client.Object{cm, dpl, svc, ing} - -- if opts.Flags.EnableTLSServiceMonitorConfig { -+ if opts.Flags.EnableTLSHTTPServices { - serviceName := serviceNameGatewayHTTP(opts.Name) - if err := configureGatewayMetricsPKI(&dpl.Spec.Template.Spec, serviceName); err != nil { - return nil, err -diff --git a/operator/internal/manifests/gateway_tenants.go b/operator/internal/manifests/gateway_tenants.go -index dba5e04ca51c2..a91ed4ef9997c 100644 ---- a/operator/internal/manifests/gateway_tenants.go -+++ b/operator/internal/manifests/gateway_tenants.go -@@ -72,7 +72,7 @@ func configureDeploymentForMode(d *appsv1.Deployment, mode lokiv1beta1.ModeType, - caBundleName, - caBundleDir, - caFile, -- flags.EnableTLSServiceMonitorConfig, -+ flags.EnableTLSHTTPServices, - flags.EnableCertificateSigningService, - secretName, - serverName, -diff --git a/operator/internal/manifests/gateway_tenants_test.go b/operator/internal/manifests/gateway_tenants_test.go -index 64a985a019cb8..37a321fdcc346 100644 ---- a/operator/internal/manifests/gateway_tenants_test.go -+++ b/operator/internal/manifests/gateway_tenants_test.go -@@ -357,6 +357,7 @@ func TestConfigureDeploymentForMode(t *testing.T) { - stackName: ""test"", - stackNs: ""test-ns"", - flags: FeatureFlags{ -+ EnableTLSHTTPServices: true, - EnableTLSServiceMonitorConfig: true, - }, - dpl: &appsv1.Deployment{ -@@ -536,6 +537,7 @@ func TestConfigureDeploymentForMode(t *testing.T) { - stackName: ""test"", - stackNs: ""test-ns"", - flags: FeatureFlags{ -+ EnableTLSHTTPServices: true, - EnableTLSServiceMonitorConfig: true, - EnableCertificateSigningService: true, - }, -@@ -822,6 +824,7 @@ func TestConfigureServiceMonitorForMode(t *testing.T) { - desc: ""openshift-logging mode with-tls-service-monitor-config"", - mode: lokiv1beta1.OpenshiftLogging, - flags: FeatureFlags{ -+ EnableTLSHTTPServices: true, - EnableTLSServiceMonitorConfig: true, - }, - sm: &monitoringv1.ServiceMonitor{ -diff --git a/operator/internal/manifests/indexgateway.go b/operator/internal/manifests/indexgateway.go -index 0394b3723088d..439f90ad44553 100644 ---- a/operator/internal/manifests/indexgateway.go -+++ b/operator/internal/manifests/indexgateway.go -@@ -19,8 +19,8 @@ import ( - // BuildIndexGateway returns a list of k8s objects for Loki IndexGateway - func BuildIndexGateway(opts Options) ([]client.Object, error) { - statefulSet := NewIndexGatewayStatefulSet(opts) -- if opts.Flags.EnableTLSServiceMonitorConfig { -- if err := configureIndexGatewayServiceMonitorPKI(statefulSet, opts.Name); err != nil { -+ if opts.Flags.EnableTLSHTTPServices { -+ if err := configureIndexGatewayHTTPServicePKI(statefulSet, opts.Name); err != nil { - return nil, err - } - } -@@ -220,9 +220,9 @@ func NewIndexGatewayHTTPService(opts Options) *corev1.Service { - } - } - --func configureIndexGatewayServiceMonitorPKI(statefulSet *appsv1.StatefulSet, stackName string) error { -+func configureIndexGatewayHTTPServicePKI(statefulSet *appsv1.StatefulSet, stackName string) error { - serviceName := serviceNameIndexGatewayHTTP(stackName) -- return configureServiceMonitorPKI(&statefulSet.Spec.Template.Spec, serviceName) -+ return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName) - } - - func configureIndexGatewayGRPCServicePKI(sts *appsv1.StatefulSet, stackName string) error { -diff --git a/operator/internal/manifests/ingester.go b/operator/internal/manifests/ingester.go -index 2265d4a9b06e3..4394913516afc 100644 ---- a/operator/internal/manifests/ingester.go -+++ b/operator/internal/manifests/ingester.go -@@ -23,8 +23,8 @@ import ( - // BuildIngester builds the k8s objects required to run Loki Ingester - func BuildIngester(opts Options) ([]client.Object, error) { - statefulSet := NewIngesterStatefulSet(opts) -- if opts.Flags.EnableTLSServiceMonitorConfig { -- if err := configureIngesterServiceMonitorPKI(statefulSet, opts.Name); err != nil { -+ if opts.Flags.EnableTLSHTTPServices { -+ if err := configureIngesterHTTPServicePKI(statefulSet, opts.Name); err != nil { - return nil, err - } - } -@@ -252,9 +252,9 @@ func NewIngesterHTTPService(opts Options) *corev1.Service { - } - } - --func configureIngesterServiceMonitorPKI(statefulSet *appsv1.StatefulSet, stackName string) error { -+func configureIngesterHTTPServicePKI(statefulSet *appsv1.StatefulSet, stackName string) error { - serviceName := serviceNameIngesterHTTP(stackName) -- return configureServiceMonitorPKI(&statefulSet.Spec.Template.Spec, serviceName) -+ return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName) - } - - func configureIngesterGRPCServicePKI(sts *appsv1.StatefulSet, stackName, stackNS string) error { -diff --git a/operator/internal/manifests/openshift/configure.go b/operator/internal/manifests/openshift/configure.go -index 42efca26a9af8..9cd489c735c55 100644 ---- a/operator/internal/manifests/openshift/configure.go -+++ b/operator/internal/manifests/openshift/configure.go -@@ -107,7 +107,7 @@ func ConfigureGatewayDeployment( - gwContainer.LivenessProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS - gwContainer.Args = gwArgs - -- // Create and mount TLS secrets volumes if it's not already done by the service monitor config. -+ // Create and mount TLS secrets volumes if not already created. - if !withTLS { - gwVolumes = append(gwVolumes, corev1.Volume{ - Name: secretVolumeName, -diff --git a/operator/internal/manifests/options.go b/operator/internal/manifests/options.go -index da26c70d043c3..2c29b1b527797 100644 ---- a/operator/internal/manifests/options.go -+++ b/operator/internal/manifests/options.go -@@ -37,6 +37,7 @@ type Options struct { - type FeatureFlags struct { - EnableCertificateSigningService bool - EnableServiceMonitors bool -+ EnableTLSHTTPServices bool - EnableTLSServiceMonitorConfig bool - EnableTLSGRPCServices bool - EnablePrometheusAlerts bool -diff --git a/operator/internal/manifests/querier.go b/operator/internal/manifests/querier.go -index 965e88aba3d1f..d917dadf5c10d 100644 ---- a/operator/internal/manifests/querier.go -+++ b/operator/internal/manifests/querier.go -@@ -21,8 +21,8 @@ import ( - // BuildQuerier returns a list of k8s objects for Loki Querier - func BuildQuerier(opts Options) ([]client.Object, error) { - deployment := NewQuerierDeployment(opts) -- if opts.Flags.EnableTLSServiceMonitorConfig { -- if err := configureQuerierServiceMonitorPKI(deployment, opts.Name); err != nil { -+ if opts.Flags.EnableTLSHTTPServices { -+ if err := configureQuerierHTTPServicePKI(deployment, opts.Name); err != nil { - return nil, err - } - } -@@ -202,9 +202,9 @@ func NewQuerierHTTPService(opts Options) *corev1.Service { - } - } - --func configureQuerierServiceMonitorPKI(deployment *appsv1.Deployment, stackName string) error { -+func configureQuerierHTTPServicePKI(deployment *appsv1.Deployment, stackName string) error { - serviceName := serviceNameQuerierHTTP(stackName) -- return configureServiceMonitorPKI(&deployment.Spec.Template.Spec, serviceName) -+ return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName) - } - - func configureQuerierGRPCServicePKI(deployment *appsv1.Deployment, stackName, stackNS string) error { -diff --git a/operator/internal/manifests/query-frontend.go b/operator/internal/manifests/query-frontend.go -index 7689e2536bf99..53402123f127e 100644 ---- a/operator/internal/manifests/query-frontend.go -+++ b/operator/internal/manifests/query-frontend.go -@@ -17,8 +17,8 @@ import ( - // BuildQueryFrontend returns a list of k8s objects for Loki QueryFrontend - func BuildQueryFrontend(opts Options) ([]client.Object, error) { - deployment := NewQueryFrontendDeployment(opts) -- if opts.Flags.EnableTLSServiceMonitorConfig { -- if err := configureQueryFrontendServiceMonitorPKI(deployment, opts.Name); err != nil { -+ if opts.Flags.EnableTLSHTTPServices { -+ if err := configureQueryFrontendHTTPServicePKI(deployment, opts.Name); err != nil { - return nil, err - } - } -@@ -206,9 +206,9 @@ func NewQueryFrontendHTTPService(opts Options) *corev1.Service { - } - } - --func configureQueryFrontendServiceMonitorPKI(deployment *appsv1.Deployment, stackName string) error { -+func configureQueryFrontendHTTPServicePKI(deployment *appsv1.Deployment, stackName string) error { - serviceName := serviceNameQueryFrontendHTTP(stackName) -- return configureServiceMonitorPKI(&deployment.Spec.Template.Spec, serviceName) -+ return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName) - } - - func configureQueryFrontendGRPCServicePKI(deployment *appsv1.Deployment, stackName string) error { -diff --git a/operator/internal/manifests/ruler.go b/operator/internal/manifests/ruler.go -index f0545b205fba4..45001c34c1b20 100644 ---- a/operator/internal/manifests/ruler.go -+++ b/operator/internal/manifests/ruler.go -@@ -20,8 +20,8 @@ import ( - // BuildRuler returns a list of k8s objects for Loki Stack Ruler - func BuildRuler(opts Options) ([]client.Object, error) { - statefulSet := NewRulerStatefulSet(opts) -- if opts.Flags.EnableTLSServiceMonitorConfig { -- if err := configureRulerServiceMonitorPKI(statefulSet, opts.Name); err != nil { -+ if opts.Flags.EnableTLSHTTPServices { -+ if err := configureRulerHTTPServicePKI(statefulSet, opts.Name); err != nil { - return nil, err - } - } -@@ -266,9 +266,9 @@ func NewRulerHTTPService(opts Options) *corev1.Service { - } - } - --func configureRulerServiceMonitorPKI(statefulSet *appsv1.StatefulSet, stackName string) error { -+func configureRulerHTTPServicePKI(statefulSet *appsv1.StatefulSet, stackName string) error { - serviceName := serviceNameRulerHTTP(stackName) -- return configureServiceMonitorPKI(&statefulSet.Spec.Template.Spec, serviceName) -+ return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName) - } - - func configureRulerGRPCServicePKI(sts *appsv1.StatefulSet, stackName string) error { -diff --git a/operator/internal/manifests/service.go b/operator/internal/manifests/service.go -index 28163377232b4..19ce5fea05549 100644 ---- a/operator/internal/manifests/service.go -+++ b/operator/internal/manifests/service.go -@@ -46,3 +46,61 @@ func configureGRPCServicePKI(podSpec *corev1.PodSpec, serviceName string) error - - return nil - } -+ -+func configureHTTPServicePKI(podSpec *corev1.PodSpec, serviceName string) error { -+ secretVolumeSpec := corev1.PodSpec{ -+ Volumes: []corev1.Volume{ -+ { -+ Name: serviceName, -+ VolumeSource: corev1.VolumeSource{ -+ Secret: &corev1.SecretVolumeSource{ -+ SecretName: serviceName, -+ }, -+ }, -+ }, -+ }, -+ } -+ secretContainerSpec := corev1.Container{ -+ VolumeMounts: []corev1.VolumeMount{ -+ { -+ Name: serviceName, -+ ReadOnly: false, -+ MountPath: httpTLSDir, -+ }, -+ }, -+ Args: []string{ -+ fmt.Sprintf(""-server.http-tls-cert-path=%s"", path.Join(httpTLSDir, tlsCertFile)), -+ fmt.Sprintf(""-server.http-tls-key-path=%s"", path.Join(httpTLSDir, tlsKeyFile)), -+ }, -+ } -+ uriSchemeContainerSpec := corev1.Container{ -+ ReadinessProbe: &corev1.Probe{ -+ ProbeHandler: corev1.ProbeHandler{ -+ HTTPGet: &corev1.HTTPGetAction{ -+ Scheme: corev1.URISchemeHTTPS, -+ }, -+ }, -+ }, -+ LivenessProbe: &corev1.Probe{ -+ ProbeHandler: corev1.ProbeHandler{ -+ HTTPGet: &corev1.HTTPGetAction{ -+ Scheme: corev1.URISchemeHTTPS, -+ }, -+ }, -+ }, -+ } -+ -+ if err := mergo.Merge(podSpec, secretVolumeSpec, mergo.WithAppendSlice); err != nil { -+ return kverrors.Wrap(err, ""failed to merge volumes"") -+ } -+ -+ if err := mergo.Merge(&podSpec.Containers[0], secretContainerSpec, mergo.WithAppendSlice); err != nil { -+ return kverrors.Wrap(err, ""failed to merge container"") -+ } -+ -+ if err := mergo.Merge(&podSpec.Containers[0], uriSchemeContainerSpec, mergo.WithOverride); err != nil { -+ return kverrors.Wrap(err, ""failed to merge container"") -+ } -+ -+ return nil -+} -diff --git a/operator/internal/manifests/service_monitor.go b/operator/internal/manifests/service_monitor.go -index ed4335dd224c2..07b1d0542b700 100644 ---- a/operator/internal/manifests/service_monitor.go -+++ b/operator/internal/manifests/service_monitor.go -@@ -1,16 +1,10 @@ - package manifests - - import ( -- ""fmt"" -- ""path"" -- -- ""github.com/ViaQ/logerr/v2/kverrors"" -- corev1 ""k8s.io/api/core/v1"" - metav1 ""k8s.io/apimachinery/pkg/apis/meta/v1"" - ""k8s.io/apimachinery/pkg/labels"" - ""sigs.k8s.io/controller-runtime/pkg/client"" - -- ""github.com/imdario/mergo"" - monitoringv1 ""github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"" - ) - -@@ -147,61 +141,3 @@ func newServiceMonitor(namespace, serviceMonitorName string, labels labels.Set, - }, - } - } -- --func configureServiceMonitorPKI(podSpec *corev1.PodSpec, serviceName string) error { -- secretVolumeSpec := corev1.PodSpec{ -- Volumes: []corev1.Volume{ -- { -- Name: serviceName, -- VolumeSource: corev1.VolumeSource{ -- Secret: &corev1.SecretVolumeSource{ -- SecretName: serviceName, -- }, -- }, -- }, -- }, -- } -- secretContainerSpec := corev1.Container{ -- VolumeMounts: []corev1.VolumeMount{ -- { -- Name: serviceName, -- ReadOnly: false, -- MountPath: httpTLSDir, -- }, -- }, -- Args: []string{ -- fmt.Sprintf(""-server.http-tls-cert-path=%s"", path.Join(httpTLSDir, tlsCertFile)), -- fmt.Sprintf(""-server.http-tls-key-path=%s"", path.Join(httpTLSDir, tlsKeyFile)), -- }, -- } -- uriSchemeContainerSpec := corev1.Container{ -- ReadinessProbe: &corev1.Probe{ -- ProbeHandler: corev1.ProbeHandler{ -- HTTPGet: &corev1.HTTPGetAction{ -- Scheme: corev1.URISchemeHTTPS, -- }, -- }, -- }, -- LivenessProbe: &corev1.Probe{ -- ProbeHandler: corev1.ProbeHandler{ -- HTTPGet: &corev1.HTTPGetAction{ -- Scheme: corev1.URISchemeHTTPS, -- }, -- }, -- }, -- } -- -- if err := mergo.Merge(podSpec, secretVolumeSpec, mergo.WithAppendSlice); err != nil { -- return kverrors.Wrap(err, ""failed to merge volumes"") -- } -- -- if err := mergo.Merge(&podSpec.Containers[0], secretContainerSpec, mergo.WithAppendSlice); err != nil { -- return kverrors.Wrap(err, ""failed to merge container"") -- } -- -- if err := mergo.Merge(&podSpec.Containers[0], uriSchemeContainerSpec, mergo.WithOverride); err != nil { -- return kverrors.Wrap(err, ""failed to merge container"") -- } -- -- return nil --} -diff --git a/operator/main.go b/operator/main.go -index d59a16feaf26a..0816196c43264 100644 ---- a/operator/main.go -+++ b/operator/main.go -@@ -71,6 +71,11 @@ func main() { - os.Exit(1) - } - -+ if ctrlCfg.Flags.EnableTLSServiceMonitorConfig && !ctrlCfg.Flags.EnableTLSHTTPServices { -+ logger.Error(kverrors.New(""enableTlsServiceMonitorConfig flag requires enableTlsHttpServices""), """") -+ os.Exit(1) -+ } -+ - if ctrlCfg.Flags.EnableServiceMonitors || ctrlCfg.Flags.EnableTLSServiceMonitorConfig { - utilruntime.Must(monitoringv1.AddToScheme(scheme)) - } -@@ -92,6 +97,7 @@ func main() { - featureFlags := manifests.FeatureFlags{ - EnableCertificateSigningService: ctrlCfg.Flags.EnableCertificateSigningService, - EnableServiceMonitors: ctrlCfg.Flags.EnableServiceMonitors, -+ EnableTLSHTTPServices: ctrlCfg.Flags.EnableTLSHTTPServices, - EnableTLSServiceMonitorConfig: ctrlCfg.Flags.EnableTLSServiceMonitorConfig, - EnableTLSGRPCServices: ctrlCfg.Flags.EnableTLSGRPCServices, - EnablePrometheusAlerts: ctrlCfg.Flags.EnablePrometheusAlerts,",operator,Support TLS enabled lokistack-gateway (Kubernetes native) (#6478) -2e098c81b81b7635a7060951791a6d5328f16c07,2019-05-09 01:41:08,Edward Welch,"starting to add label extraction, super rough but need to do some work in a different branch",False,"diff --git a/pkg/parser/entry/parsers/regex.go b/pkg/parser/entry/parsers/regex.go -new file mode 100644 -index 0000000000000..7840570f769a4 ---- /dev/null -+++ b/pkg/parser/entry/parsers/regex.go -@@ -0,0 +1,32 @@ -+package parsers -+ -+import ( -+ ""regexp"" -+ ""time"" -+ -+ ""github.com/mitchellh/mapstructure"" -+ ""github.com/prometheus/common/model"" -+ -+ ""github.com/grafana/loki/pkg/parser"" -+) -+ -+type Config struct { -+ Expr string -+ Labels []parser.Label -+} -+ -+type Regex struct { -+ expr *regexp.Regexp -+} -+ -+func NewRegex(config map[interface{}]interface{}) Regex { -+ -+ err := mapstructure.Decode(rg, &cfg2) -+ return Regex{ -+ expr: regexp.MustCompile(config.Expr), -+ } -+} -+ -+func (r *Regex) Parse(labels model.LabelSet, time time.Time, entry string) (time.Time, string, error) { -+ -+} -diff --git a/pkg/parser/entry/processor.go b/pkg/parser/entry/processor.go -new file mode 100644 -index 0000000000000..c5890a9e71a7d ---- /dev/null -+++ b/pkg/parser/entry/processor.go -@@ -0,0 +1,73 @@ -+package entry -+ -+import ( -+ ""time"" -+ -+ ""github.com/mitchellh/mapstructure"" -+ ""github.com/prometheus/common/model"" -+ -+ ""github.com/grafana/loki/pkg/parser/entry/parsers"" -+) -+ -+// Parser takes an existing set of labels, timestamp and log entry and returns either a possibly mutated -+// timestamp and log entry -+type Parser interface { -+ //TODO decide on how to handle labels as a pointer or not -+ Parse(labels model.LabelSet, time time.Time, entry string) (time.Time, string, error) -+} -+ -+type Config struct { -+ //FIXME do we keep the yaml the same? we have to accommodate the kube label parsing happening first (so we can act on those labels) -+ ParserStages []map[interface{}]interface{} `yaml:""parser_stages""` -+} -+ -+type Processor struct { -+ parsers []Parser -+} -+ -+func NewProcessor(config Config) (Processor, error) { -+ -+ rg := config.ParserStages[0][""regex""] -+ -+ //The idea is to load the stages, possibly using reflection to instantiate based on packages? -+ //Then the processor will pass every log line through the pipeline stages -+ //With debug logging to show what the labels are before/after as well as timestamp and log message -+ //Metrics to cover each pipeline stage and the entire process (do we have to pass the metrics in to the processor??) -+ -+ //Error handling, fail on setup errors?, fail on pipeline processing errors? -+ -+ //we handle safe casting so we can direct the user to yaml issues if the key isn't a string -+ -+ for _, s := range config.ParserStages { -+ if len(s) > 1 { -+ panic(""Pipeline stages must contain only one key:"") -+ } -+ -+ switch s { -+ case ""regex"": -+ var cfg2 parsers.Config -+ err := mapstructure.Decode(rg, &cfg2) -+ if err != nil { -+ panic(err) -+ } -+ } -+ } -+ return Processor{}, nil -+} -+ -+func (p *Processor) Process(labels *model.LabelSet, time time.Time, entry string) (time.Time, string, error) { -+ t := time -+ e := entry -+ var err error -+ //debug log labels, time, and string -+ for _, parser := range p.parsers { -+ t, e, err = parser.Parse(labels, t, e) -+ if err != nil { -+ //Log error -+ //FIXME how do we proceed? panic?? -+ //if output is defined stages should -+ } -+ //debug log labels, time, and string -+ } -+ return t, e, nil -+} -diff --git a/pkg/parser/entry/processor_test.go b/pkg/parser/entry/processor_test.go -new file mode 100644 -index 0000000000000..7a16f4d676c02 ---- /dev/null -+++ b/pkg/parser/entry/processor_test.go -@@ -0,0 +1,25 @@ -+package entry -+ -+import ( -+ ""testing"" -+ -+ ""gopkg.in/yaml.v2"" -+) -+ -+var testYaml = ` -+parser_stages: -+ - regex: -+ expr: ./* -+ labels: -+ - test: -+ source: somesource -+ -+` -+ -+func TestNewProcessor(t *testing.T) { -+ var config Config -+ err := yaml.Unmarshal([]byte(testYaml), &config) -+ if err != nil { -+ panic(err) -+ } -+} -diff --git a/pkg/parser/model.go b/pkg/parser/model.go -new file mode 100644 -index 0000000000000..98a612a97ef6b ---- /dev/null -+++ b/pkg/parser/model.go -@@ -0,0 +1,6 @@ -+package parser -+ -+type Label struct { -+ LabelName string -+ Source string -+}",unknown,"starting to add label extraction, super rough but need to do some work in a different branch" -ffc87055b8d4bda594bf7c5b077a5fe6bd5eb8c9,2022-02-08 01:11:02,Nate Walters,"Delete docs/sources/copyright directory (#5325) - -Removing copyright notice per https://raintank-corp.slack.com/archives/CH34H23HD/p1643999482151029",False,"diff --git a/docs/sources/copyright/_index.md b/docs/sources/copyright/_index.md -deleted file mode 100644 -index 3bd52d8945ca7..0000000000000 ---- a/docs/sources/copyright/_index.md -+++ /dev/null -@@ -1,8 +0,0 @@ --+++ --title = ""Copyright notice"" --aliases = [""/docs/loki/next/copyright-notice""] --+++ -- --# Copyright notice -- --Copyright © 2021 Raintank, Inc. dba Grafana Labs. All Rights Reserved",unknown,"Delete docs/sources/copyright directory (#5325) - -Removing copyright notice per https://raintank-corp.slack.com/archives/CH34H23HD/p1643999482151029" -3ebab6f3931841f62ac59e6b09afef98db656c71,2024-07-17 02:18:27,Pger-Y,feat(helm): Support alibabacloud oss in helm chart (#13441),False,"diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl -index 58fc5c6cba800..8d4a0a9cb94ef 100644 ---- a/production/helm/loki/templates/_helpers.tpl -+++ b/production/helm/loki/templates/_helpers.tpl -@@ -297,6 +297,14 @@ azure: - endpoint_suffix: {{ . }} - {{- end }} - {{- end -}} -+{{- else if eq .Values.loki.storage.type ""alibabacloud"" -}} -+{{- with .Values.loki.storage.alibabacloud }} -+alibabacloud: -+ bucket: {{ $.Values.loki.storage.bucketNames.chunks }} -+ endpoint: {{ .endpoint }} -+ access_key_id: {{ .accessKeyId }} -+ secret_access_key: {{ .secretAccessKey }} -+{{- end -}} - {{- else if eq .Values.loki.storage.type ""swift"" -}} - {{- with .Values.loki.storage.swift }} - swift:",feat,Support alibabacloud oss in helm chart (#13441) -1e2079eb5fdd93b3294c7cacca4b37ec02e47def,2022-11-03 01:10:29,Pablo,Fix Hugo build for docs (#7580),False,"diff --git a/docs/sources/installation/helm/_index.md b/docs/sources/installation/helm/_index.md -index 8e2f8b5156f70..47290cc2a86f9 100644 ---- a/docs/sources/installation/helm/_index.md -+++ b/docs/sources/installation/helm/_index.md -@@ -12,8 +12,6 @@ keywords: - - installation - --- - -- -- - The [Helm](https://helm.sh/) chart allows you to configure, install, and upgrade Grafana Loki within a Kubernetes cluster. - - This guide contains the following sections: -diff --git a/docs/sources/installation/helm/concepts.md b/docs/sources/installation/helm/concepts.md -index aee31832d3de6..548b94d795a11 100644 ---- a/docs/sources/installation/helm/concepts.md -+++ b/docs/sources/installation/helm/concepts.md -@@ -11,8 +11,6 @@ keywords: - - caching - --- - --- -- - # Components - - This section describes the components installed by the Helm Chart.",unknown,Fix Hugo build for docs (#7580) -513cb44b081301c895e9f52b78fe9254ed083a06,2023-09-06 22:25:42,Grot (@grafanabot),"[CI/CD] Update yaml file `./production/helm/loki/Chart.yaml` (+1 other) (#10483) - -**Here is a summary of the updates contained in this PR:** -*** -Update attribute `$.appVersion` in yaml file -`./production/helm/loki/Chart.yaml` to the following value: `2.9.0` -*** -Bump version of Helm Chart -Add changelog entry to `./production/helm/loki/CHANGELOG.md` -Re-generate docs - -Co-authored-by: Kaviraj Kanagaraj ",False,"diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md -index 1bccd960b84bd..15da90a92ed43 100644 ---- a/production/helm/loki/CHANGELOG.md -+++ b/production/helm/loki/CHANGELOG.md -@@ -13,6 +13,11 @@ Entries should include a reference to the pull request that introduced the chang - - [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) - -+## 5.18.0 -+ -+- [CHANGE] Changed version of Loki to 2.9.0 -+ -+ - ## 5.17.0 - - - [CHANGE] Changed version of Loki to 2.9.0 -diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml -index d53ffa28fe0e8..f5cbabf8f4c6b 100644 ---- a/production/helm/loki/Chart.yaml -+++ b/production/helm/loki/Chart.yaml -@@ -3,7 +3,7 @@ name: loki - description: Helm chart for Grafana Loki in simple, scalable mode - type: application - appVersion: 2.9.0 --version: 5.17.0 -+version: 5.18.0 - home: https://grafana.github.io/helm-charts - sources: - - https://github.com/grafana/loki -diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md -index ddc0f347190e8..11b8d4ec263a2 100644 ---- a/production/helm/loki/README.md -+++ b/production/helm/loki/README.md -@@ -1,6 +1,6 @@ - # loki - --![Version: 5.17.0](https://img.shields.io/badge/Version-5.17.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.0](https://img.shields.io/badge/AppVersion-2.9.0-informational?style=flat-square) -+![Version: 5.18.0](https://img.shields.io/badge/Version-5.18.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.0](https://img.shields.io/badge/AppVersion-2.9.0-informational?style=flat-square) - - Helm chart for Grafana Loki in simple, scalable mode",unknown,"[CI/CD] Update yaml file `./production/helm/loki/Chart.yaml` (+1 other) (#10483) - -**Here is a summary of the updates contained in this PR:** -*** -Update attribute `$.appVersion` in yaml file -`./production/helm/loki/Chart.yaml` to the following value: `2.9.0` -*** -Bump version of Helm Chart -Add changelog entry to `./production/helm/loki/CHANGELOG.md` -Re-generate docs - -Co-authored-by: Kaviraj Kanagaraj " -d9eeed353e91ac573cddc58ebcbf7920f7de57d6,2024-11-06 19:05:27,renovate[bot],"chore(deps): update module github.com/golang-jwt/jwt/v4 to v4.5.1 [security] (#14769) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/go.mod b/go.mod -index 6ab7fbbc77807..12e972ab80f27 100644 ---- a/go.mod -+++ b/go.mod -@@ -277,7 +277,7 @@ require ( - github.com/go-playground/validator/v10 v10.19.0 // indirect - github.com/go-zookeeper/zk v1.0.3 // indirect - github.com/gofrs/flock v0.8.1 // indirect -- github.com/golang-jwt/jwt/v4 v4.5.0 // indirect -+ github.com/golang-jwt/jwt/v4 v4.5.1 // indirect - github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/btree v1.1.3 // indirect -diff --git a/go.sum b/go.sum -index f809e8d9a1230..99788a5ba5f8a 100644 ---- a/go.sum -+++ b/go.sum -@@ -1563,8 +1563,9 @@ github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci - github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= - github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= - github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= --github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= - github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -+github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= -+github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= - github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= - github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= - github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go -index c0a6f6927917a..9dd36e5a5acd4 100644 ---- a/vendor/github.com/golang-jwt/jwt/v4/parser.go -+++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go -@@ -36,19 +36,21 @@ func NewParser(options ...ParserOption) *Parser { - return p - } - --// Parse parses, validates, verifies the signature and returns the parsed token. --// keyFunc will receive the parsed token and should return the key for validating. -+// Parse parses, validates, verifies the signature and returns the parsed token. keyFunc will -+// receive the parsed token and should return the key for validating. - func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) - } - --// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims --// interface. This provides default values which can be overridden and allows a caller to use their own type, rather --// than the default MapClaims implementation of Claims. -+// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object -+// implementing the Claims interface. This provides default values which can be overridden and -+// allows a caller to use their own type, rather than the default MapClaims implementation of -+// Claims. - // --// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), --// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the --// proper memory for it before passing in the overall claims, otherwise you might run into a panic. -+// Note: If you provide a custom claim implementation that embeds one of the standard claims (such -+// as RegisteredClaims), make sure that a) you either embed a non-pointer version of the claims or -+// b) if you are using a pointer, allocate the proper memory for it before passing in the overall -+// claims, otherwise you might run into a panic. - func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - token, parts, err := p.ParseUnverified(tokenString, claims) - if err != nil { -@@ -85,12 +87,17 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf - return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} - } - -+ // Perform validation -+ token.Signature = parts[2] -+ if err := token.Method.Verify(strings.Join(parts[0:2], "".""), token.Signature, key); err != nil { -+ return token, &ValidationError{Inner: err, Errors: ValidationErrorSignatureInvalid} -+ } -+ - vErr := &ValidationError{} - - // Validate Claims - if !p.SkipClaimsValidation { - if err := token.Claims.Valid(); err != nil { -- - // If the Claims Valid returned an error, check if it is a validation error, - // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set - if e, ok := err.(*ValidationError); !ok { -@@ -98,22 +105,14 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf - } else { - vErr = e - } -+ return token, vErr - } - } - -- // Perform validation -- token.Signature = parts[2] -- if err = token.Method.Verify(strings.Join(parts[0:2], "".""), token.Signature, key); err != nil { -- vErr.Inner = err -- vErr.Errors |= ValidationErrorSignatureInvalid -- } -- -- if vErr.valid() { -- token.Valid = true -- return token, nil -- } -+ // No errors so far, token is valid. -+ token.Valid = true - -- return token, vErr -+ return token, nil - } - - // ParseUnverified parses the token but doesn't validate the signature. -diff --git a/vendor/modules.txt b/vendor/modules.txt -index f0b0fc8816b3f..8d3039010988f 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -892,7 +892,7 @@ github.com/gogo/protobuf/types - # github.com/gogo/status v1.1.1 - ## explicit; go 1.12 - github.com/gogo/status --# github.com/golang-jwt/jwt/v4 v4.5.0 -+# github.com/golang-jwt/jwt/v4 v4.5.1 - ## explicit; go 1.16 - github.com/golang-jwt/jwt/v4 - # github.com/golang-jwt/jwt/v5 v5.2.1",chore,"update module github.com/golang-jwt/jwt/v4 to v4.5.1 [security] (#14769) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -c66ffd125cd89f5845a75a1751186fa46d003f70,2023-10-26 14:03:30,renovate[bot],"fix(deps): update github.com/c2h5oh/datasize digest to 859f65c (main) (#10820) - -[![Mend -Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) - -This PR contains the following updates: - -| Package | Type | Update | Change | -|---|---|---|---| -| [github.com/c2h5oh/datasize](https://togithub.com/c2h5oh/datasize) | -require | digest | `28bbd47` -> `859f65c` | - ---- - -### Configuration - -📅 **Schedule**: Branch creation - At any time (no schedule defined), -Automerge - At any time (no schedule defined). - -🚦 **Automerge**: Disabled by config. Please merge this manually once you -are satisfied. - -♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the -rebase/retry checkbox. - -🔕 **Ignore**: Close this PR and you won't be reminded about this update -again. - ---- - -- [ ] If you want to rebase/retry this PR, check -this box - ---- - -This PR has been generated by [Mend -Renovate](https://www.mend.io/free-developer-tools/renovate/). View -repository job log -[here](https://developer.mend.io/github/grafana/loki). - - - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/go.mod b/go.mod -index ddf8b367742f5..efad30c6ba83b 100644 ---- a/go.mod -+++ b/go.mod -@@ -20,7 +20,7 @@ require ( - github.com/baidubce/bce-sdk-go v0.9.141 - github.com/bmatcuk/doublestar v1.3.4 - github.com/buger/jsonparser v1.1.1 -- github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee -+ github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b - github.com/cespare/xxhash v1.1.0 - github.com/cespare/xxhash/v2 v2.2.0 - github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf -diff --git a/go.sum b/go.sum -index a141a9213dd6a..465a01a6ac13a 100644 ---- a/go.sum -+++ b/go.sum -@@ -387,8 +387,8 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR - github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= - github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= - github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= --github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee h1:BnPxIde0gjtTnc9Er7cxvBk8DHLWhEux0SxayC8dP6I= --github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= -+github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdSE0JxlhR2hKnGPcNB35BjQf4RYQDY= -+github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= - github.com/caddyserver/caddy v1.0.4/go.mod h1:uruyfVsyMcDb3IOzSKsi1x0wOjy1my/PxOSTcD+24jM= - github.com/caio/go-tdigest v2.3.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= - github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -diff --git a/vendor/github.com/c2h5oh/datasize/README.md b/vendor/github.com/c2h5oh/datasize/README.md -index ac0cf8586e9f2..f6e828587f007 100644 ---- a/vendor/github.com/c2h5oh/datasize/README.md -+++ b/vendor/github.com/c2h5oh/datasize/README.md -@@ -19,7 +19,7 @@ Just like `time` package provides `time.Second`, `time.Day` constants `datasize` - Just like `time` package provides `duration.Nanoseconds() uint64 `, `duration.Hours() float64` helpers `datasize` has. - - * `ByteSize.Bytes() uint64` --* `ByteSize.Kilobytes() float4` -+* `ByteSize.Kilobytes() float64` - * `ByteSize.Megabytes() float64` - * `ByteSize.Gigabytes() float64` - * `ByteSize.Terabytes() float64` -diff --git a/vendor/github.com/c2h5oh/datasize/datasize.go b/vendor/github.com/c2h5oh/datasize/datasize.go -index 6754788162496..2ce762751c08f 100644 ---- a/vendor/github.com/c2h5oh/datasize/datasize.go -+++ b/vendor/github.com/c2h5oh/datasize/datasize.go -@@ -215,3 +215,25 @@ BitsError: - *b = 0 - return &strconv.NumError{fnUnmarshalText, string(t0), ErrBits} - } -+ -+func Parse(t []byte) (ByteSize, error) { -+ var v ByteSize -+ err := v.UnmarshalText(t) -+ return v, err -+} -+ -+func MustParse(t []byte) ByteSize { -+ v, err := Parse(t) -+ if err != nil { -+ panic(err) -+ } -+ return v -+} -+ -+func ParseString(s string) (ByteSize, error) { -+ return Parse([]byte(s)) -+} -+ -+func MustParseString(s string) ByteSize { -+ return MustParse([]byte(s)) -+} -diff --git a/vendor/modules.txt b/vendor/modules.txt -index 8e76eb21adbf8..97cf4c4cc6365 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -423,7 +423,7 @@ github.com/bmatcuk/doublestar - # github.com/buger/jsonparser v1.1.1 - ## explicit; go 1.13 - github.com/buger/jsonparser --# github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee -+# github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b - ## explicit - github.com/c2h5oh/datasize - # github.com/census-instrumentation/opencensus-proto v0.4.1",fix,"update github.com/c2h5oh/datasize digest to 859f65c (main) (#10820) - -[![Mend -Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) - -This PR contains the following updates: - -| Package | Type | Update | Change | -|---|---|---|---| -| [github.com/c2h5oh/datasize](https://togithub.com/c2h5oh/datasize) | -require | digest | `28bbd47` -> `859f65c` | - ---- - -### Configuration - -📅 **Schedule**: Branch creation - At any time (no schedule defined), -Automerge - At any time (no schedule defined). - -🚦 **Automerge**: Disabled by config. Please merge this manually once you -are satisfied. - -♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the -rebase/retry checkbox. - -🔕 **Ignore**: Close this PR and you won't be reminded about this update -again. - ---- - -- [ ] If you want to rebase/retry this PR, check -this box - ---- - -This PR has been generated by [Mend -Renovate](https://www.mend.io/free-developer-tools/renovate/). View -repository job log -[here](https://developer.mend.io/github/grafana/loki). - - - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -f2a99ce3034c3ff407564010930c25fae8c9780f,2018-05-16 20:24:24,Tom Wilkie,"Update vendor - -Signed-off-by: Tom Wilkie ",False,"diff --git a/Gopkg.lock b/Gopkg.lock -index bbde2cb0d5bfd..d34ba8fbb1310 100644 ---- a/Gopkg.lock -+++ b/Gopkg.lock -@@ -38,18 +38,6 @@ - packages = ["".""] - revision = ""de5bf2ad457846296e2031421a34e2568e304e35"" - --[[projects]] -- name = ""github.com/Shopify/sarama"" -- packages = ["".""] -- revision = ""f7be6aa2bc7b2e38edf816b08b582782194a1c02"" -- version = ""v1.16.0"" -- --[[projects]] -- name = ""github.com/apache/thrift"" -- packages = [""lib/go/thrift""] -- revision = ""327ebb6c2b6df8bf075da02ef45a2a034e9b79ba"" -- version = ""0.11.0"" -- - [[projects]] - name = ""github.com/aws/aws-sdk-go"" - packages = [ -@@ -123,24 +111,6 @@ - revision = ""48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89"" - version = ""v2.6.2"" - --[[projects]] -- name = ""github.com/eapache/go-resiliency"" -- packages = [""breaker""] -- revision = ""ea41b0fad31007accc7f806884dcdf3da98b79ce"" -- version = ""v1.1.0"" -- --[[projects]] -- branch = ""master"" -- name = ""github.com/eapache/go-xerial-snappy"" -- packages = ["".""] -- revision = ""bb955e01b9346ac19dc29eb16586c90ded99a98c"" -- --[[projects]] -- name = ""github.com/eapache/queue"" -- packages = ["".""] -- revision = ""44cc805cf13205b55f69e14bcb69867d1ae92f98"" -- version = ""v1.1.0"" -- - [[projects]] - name = ""github.com/emicklei/go-restful"" - packages = [ -@@ -286,7 +256,7 @@ - branch = ""master"" - name = ""github.com/grpc-ecosystem/grpc-opentracing"" - packages = [""go/otgrpc""] -- revision = ""0e7658f8ee99ee5aa683e2a032b8880091b7a055"" -+ revision = ""8e809c8a86450a29b90dcc9efbf062d0fe6d9746"" - - [[projects]] - name = ""github.com/hashicorp/consul"" -@@ -381,12 +351,6 @@ - packages = ["".""] - revision = ""b9ed6aed6cf9de7330c7c8e81d3d8e49086539e8"" - --[[projects]] -- branch = ""master"" -- name = ""github.com/opentracing-contrib/go-observer"" -- packages = ["".""] -- revision = ""a52f2342449246d5bcc273e65cbdcfa5f7d6c63c"" -- - [[projects]] - branch = ""master"" - name = ""github.com/opentracing-contrib/go-stdlib"" -@@ -403,31 +367,6 @@ - revision = ""1949ddbfd147afd4d964a9f00b24eb291e0e7c38"" - version = ""v1.0.2"" - --[[projects]] -- name = ""github.com/openzipkin/zipkin-go-opentracing"" -- packages = [ -- ""."", -- ""flag"", -- ""thrift/gen-go/scribe"", -- ""thrift/gen-go/zipkincore"", -- ""types"", -- ""wire"" -- ] -- revision = ""4c9fbcbd6d73a644fd17214fe475296780c68fb5"" -- version = ""v0.3.3"" -- --[[projects]] -- name = ""github.com/pierrec/lz4"" -- packages = ["".""] -- revision = ""2fcda4cb7018ce05a25959d2fe08c83e3329f169"" -- version = ""v1.1"" -- --[[projects]] -- name = ""github.com/pierrec/xxHash"" -- packages = [""xxHash32""] -- revision = ""f051bb7f1d1aaf1b5a665d74fb6b0217712c69f7"" -- version = ""v0.1.1"" -- - [[projects]] - name = ""github.com/pkg/errors"" - packages = ["".""] -@@ -502,12 +441,6 @@ - ] - revision = ""e7584ee34560040c30c373fedba040b079dcac8c"" - --[[projects]] -- branch = ""master"" -- name = ""github.com/rcrowley/go-metrics"" -- packages = ["".""] -- revision = ""d932a24a8ccb8fcadc993e5c6c58f93dac168294"" -- - [[projects]] - branch = ""master"" - name = ""github.com/samuel/go-zookeeper"" -@@ -546,13 +479,7 @@ - revision = ""708a42d246822952f38190a8d8c4e6b16a0e600c"" - - [[projects]] -- branch = ""master"" -- name = ""github.com/weaveworks-experiments/loki"" -- packages = [""pkg/client""] -- revision = ""80bb2c795b3f88e2d2ce5dad6b26bd790806f2c8"" -- --[[projects]] -- branch = ""master"" -+ branch = ""stream-middleware"" - name = ""github.com/weaveworks/common"" - packages = [ - ""errors"", -@@ -565,7 +492,8 @@ - ""signals"", - ""user"" - ] -- revision = ""ebab3a78900a09cebce8d0b37f2a69d474796bf4"" -+ revision = ""59cfd4d5230f01812158ad90a64257d1c9b43bed"" -+ source = ""github.com/tomwilkie/weaveworks-common"" - - [[projects]] - branch = ""factorings"" -@@ -886,6 +814,6 @@ - [solve-meta] - analyzer-name = ""dep"" - analyzer-version = 1 -- inputs-digest = ""409d8970a073ff200484a0122c71861f933cb397866c4b2e3cc0c88329688ff5"" -+ inputs-digest = ""f2dd40edc3d19bf19d5233c8cf72ca403c7f3c9dbd63c63807ca438885f46c87"" - solver-name = ""gps-cdcl"" - solver-version = 1 -diff --git a/Gopkg.toml b/Gopkg.toml -index 44c210b91bd1d..03ebdf92e8ef1 100644 ---- a/Gopkg.toml -+++ b/Gopkg.toml -@@ -26,8 +26,13 @@ - - [[constraint]] - name = ""github.com/weaveworks/cortex"" -- branch = ""factorings"" - source = ""github.com/grafana/cortex"" -+ branch = ""factorings"" -+ -+[[constraint]] -+ name = ""github.com/weaveworks/common"" -+ source = ""github.com/tomwilkie/weaveworks-common"" -+ branch = ""stream-middleware"" - - [[override]] - name = ""github.com/sercand/kuberesolver"" -@@ -37,6 +42,8 @@ - name = ""gopkg.in/fsnotify.v1"" - source = ""https://github.com/fsnotify/fsnotify.git"" - -+# For Prometheus -+ - [[override]] - name = ""k8s.io/client-go"" - revision = ""3627aeb7d4f6ade38f995d2c923e459146493c7e"" -diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore -deleted file mode 100644 -index c6c482dca8e3f..0000000000000 ---- a/vendor/github.com/Shopify/sarama/.gitignore -+++ /dev/null -@@ -1,26 +0,0 @@ --# Compiled Object files, Static and Dynamic libs (Shared Objects) --*.o --*.a --*.so --*.test -- --# Folders --_obj --_test --.vagrant -- --# Architecture specific extensions/prefixes --*.[568vq] --[568vq].out -- --*.cgo1.go --*.cgo2.c --_cgo_defun.c --_cgo_gotypes.go --_cgo_export.* -- --_testmain.go -- --*.exe -- --coverage.txt -diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml -deleted file mode 100644 -index cc38769fa3403..0000000000000 ---- a/vendor/github.com/Shopify/sarama/.travis.yml -+++ /dev/null -@@ -1,35 +0,0 @@ --language: go --go: --- 1.8.x --- 1.9.x -- --env: -- global: -- - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095 -- - TOXIPROXY_ADDR=http://localhost:8474 -- - KAFKA_INSTALL_ROOT=/home/travis/kafka -- - KAFKA_HOSTNAME=localhost -- - DEBUG=true -- matrix: -- - KAFKA_VERSION=0.10.2.1 -- - KAFKA_VERSION=0.11.0.2 -- - KAFKA_VERSION=1.0.0 -- --before_install: --- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR} --- vagrant/install_cluster.sh --- vagrant/boot_cluster.sh --- vagrant/create_topics.sh -- --install: make install_dependencies -- --script: --- make test --- make vet --- make errcheck --- make fmt -- --after_success: --- bash <(curl -s https://codecov.io/bash) -- --after_script: vagrant/halt_cluster.sh -diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md -deleted file mode 100644 -index 836841650c3dc..0000000000000 ---- a/vendor/github.com/Shopify/sarama/CHANGELOG.md -+++ /dev/null -@@ -1,503 +0,0 @@ --# Changelog -- --#### Version 1.16.0 (2018-02-12) -- --New Features: -- - Add support for the Create/Delete Topics request/response pairs -- ([#1007](https://github.com/Shopify/sarama/pull/1007), -- [#1008](https://github.com/Shopify/sarama/pull/1008)). -- - Add support for the Describe/Create/Delete ACL request/response pairs -- ([#1009](https://github.com/Shopify/sarama/pull/1009)). -- - Add support for the five transaction-related request/response pairs -- ([#1016](https://github.com/Shopify/sarama/pull/1016)). -- --Improvements: -- - Permit setting version on mock producer responses -- ([#999](https://github.com/Shopify/sarama/pull/999)). -- - Add `NewMockBrokerListener` helper for testing TLS connections -- ([#1019](https://github.com/Shopify/sarama/pull/1019)). -- - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB -- which results in much higher throughput in most cases -- ([#1024](https://github.com/Shopify/sarama/pull/1024)). -- - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to -- reduce CPU and memory usage when processing many partitions -- ([#1028](https://github.com/Shopify/sarama/pull/1028)). -- - Assign relative offsets to messages in the producer to save the brokers a -- recompression pass -- ([#1002](https://github.com/Shopify/sarama/pull/1002), -- [#1015](https://github.com/Shopify/sarama/pull/1015)). -- --Bug Fixes: -- - Fix producing uncompressed batches with the new protocol format -- ([#1032](https://github.com/Shopify/sarama/issues/1032)). -- - Fix consuming compacted topics with the new protocol format -- ([#1005](https://github.com/Shopify/sarama/issues/1005)). -- - Fix consuming topics with a mix of protocol formats -- ([#1021](https://github.com/Shopify/sarama/issues/1021)). -- - Fix consuming when the broker includes multiple batches in a single response -- ([#1022](https://github.com/Shopify/sarama/issues/1022)). -- - Fix detection of `PartialTrailingMessage` when the partial message was -- truncated before the magic value indicating its version -- ([#1030](https://github.com/Shopify/sarama/pull/1030)). -- - Fix expectation-checking in the mock of `SyncProducer.SendMessages` -- ([#1035](https://github.com/Shopify/sarama/pull/1035)). -- --#### Version 1.15.0 (2017-12-08) -- --New Features: -- - Claim official support for Kafka 1.0, though it did already work -- ([#984](https://github.com/Shopify/sarama/pull/984)). -- - Helper methods for Kafka version numbers to/from strings -- ([#989](https://github.com/Shopify/sarama/pull/989)). -- - Implement CreatePartitions request/response -- ([#985](https://github.com/Shopify/sarama/pull/985)). -- --Improvements: -- - Add error codes 45-60 -- ([#986](https://github.com/Shopify/sarama/issues/986)). -- --Bug Fixes: -- - Fix slow consuming for certain Kafka 0.11/1.0 configurations -- ([#982](https://github.com/Shopify/sarama/pull/982)). -- - Correctly determine when a FetchResponse contains the new message format -- ([#990](https://github.com/Shopify/sarama/pull/990)). -- - Fix producing with multiple headers -- ([#996](https://github.com/Shopify/sarama/pull/996)). -- - Fix handling of truncated record batches -- ([#998](https://github.com/Shopify/sarama/pull/998)). -- - Fix leaking metrics when closing brokers -- ([#991](https://github.com/Shopify/sarama/pull/991)). -- --#### Version 1.14.0 (2017-11-13) -- --New Features: -- - Add support for the new Kafka 0.11 record-batch format, including the wire -- protocol and the necessary behavioural changes in the producer and consumer. -- Transactions and idempotency are not yet supported, but producing and -- consuming should work with all the existing bells and whistles (batching, -- compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta -- of Arista Networks for this work. Part of -- ([#901](https://github.com/Shopify/sarama/issues/901)). -- --Bug Fixes: -- - Fix encoding of ProduceResponse versions in test -- ([#970](https://github.com/Shopify/sarama/pull/970)). -- - Return partial replicas list when we have it -- ([#975](https://github.com/Shopify/sarama/pull/975)). -- --#### Version 1.13.0 (2017-10-04) -- --New Features: -- - Support for FetchRequest version 3 -- ([#905](https://github.com/Shopify/sarama/pull/905)). -- - Permit setting version on mock FetchResponses -- ([#939](https://github.com/Shopify/sarama/pull/939)). -- - Add a configuration option to support storing only minimal metadata for -- extremely large clusters -- ([#937](https://github.com/Shopify/sarama/pull/937)). -- - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets -- ([#932](https://github.com/Shopify/sarama/pull/932)). -- --Improvements: -- - Provide the block-level timestamp when consuming compressed messages -- ([#885](https://github.com/Shopify/sarama/issues/885)). -- - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned -- by the broker, which can be meaningful -- ([#930](https://github.com/Shopify/sarama/pull/930)). -- - Use a `Ticker` to reduce consumer timer overhead at the cost of higher -- variance in the actual timeout -- ([#933](https://github.com/Shopify/sarama/pull/933)). -- --Bug Fixes: -- - Gracefully handle messages with negative timestamps -- ([#907](https://github.com/Shopify/sarama/pull/907)). -- - Raise a proper error when encountering an unknown message version -- ([#940](https://github.com/Shopify/sarama/pull/940)). -- --#### Version 1.12.0 (2017-05-08) -- --New Features: -- - Added support for the `ApiVersions` request and response pair, and Kafka -- version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note -- that you still need to specify the Kafka version in the Sarama configuration -- for the time being. -- - Added a `Brokers` method to the Client which returns the complete set of -- active brokers ([#813](https://github.com/Shopify/sarama/pull/813)). -- - Added an `InSyncReplicas` method to the Client which returns the set of all -- in-sync broker IDs for the given partition, now that the Kafka versions for -- which this was misleading are no longer in our supported set -- ([#872](https://github.com/Shopify/sarama/pull/872)). -- - Added a `NewCustomHashPartitioner` method which allows constructing a hash -- partitioner with a custom hash method in case the default (FNV-1a) is not -- suitable -- ([#837](https://github.com/Shopify/sarama/pull/837), -- [#841](https://github.com/Shopify/sarama/pull/841)). -- --Improvements: -- - Recognize more Kafka error codes -- ([#859](https://github.com/Shopify/sarama/pull/859)). -- --Bug Fixes: -- - Fix an issue where decoding a malformed FetchRequest would not return the -- correct error ([#818](https://github.com/Shopify/sarama/pull/818)). -- - Respect ordering of group protocols in JoinGroupRequests. This fix is -- transparent if you're using the `AddGroupProtocol` or -- `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from -- the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols` -- ([#812](https://github.com/Shopify/sarama/issues/812)). -- - Fix an alignment-related issue with atomics on 32-bit architectures -- ([#859](https://github.com/Shopify/sarama/pull/859)). -- --#### Version 1.11.0 (2016-12-20) -- --_Important:_ As of Sarama 1.11 it is necessary to set the config value of --`Producer.Return.Successes` to true in order to use the SyncProducer. Previous --versions would silently override this value when instantiating a SyncProducer --which led to unexpected values and data races. -- --New Features: -- - Metrics! Thanks to Sébastien Launay for all his work on this feature -- ([#701](https://github.com/Shopify/sarama/pull/701), -- [#746](https://github.com/Shopify/sarama/pull/746), -- [#766](https://github.com/Shopify/sarama/pull/766)). -- - Add support for LZ4 compression -- ([#786](https://github.com/Shopify/sarama/pull/786)). -- - Add support for ListOffsetRequest v1 and Kafka 0.10.1 -- ([#775](https://github.com/Shopify/sarama/pull/775)). -- - Added a `HighWaterMarks` method to the Consumer which aggregates the -- `HighWaterMarkOffset` values of its child topic/partitions -- ([#769](https://github.com/Shopify/sarama/pull/769)). -- --Bug Fixes: -- - Fixed producing when using timestamps, compression and Kafka 0.10 -- ([#759](https://github.com/Shopify/sarama/pull/759)). -- - Added missing decoder methods to DescribeGroups response -- ([#756](https://github.com/Shopify/sarama/pull/756)). -- - Fix producer shutdown when `Return.Errors` is disabled -- ([#787](https://github.com/Shopify/sarama/pull/787)). -- - Don't mutate configuration in SyncProducer -- ([#790](https://github.com/Shopify/sarama/pull/790)). -- - Fix crash on SASL initialization failure -- ([#795](https://github.com/Shopify/sarama/pull/795)). -- --#### Version 1.10.1 (2016-08-30) -- --Bug Fixes: -- - Fix the documentation for `HashPartitioner` which was incorrect -- ([#717](https://github.com/Shopify/sarama/pull/717)). -- - Permit client creation even when it is limited by ACLs -- ([#722](https://github.com/Shopify/sarama/pull/722)). -- - Several fixes to the consumer timer optimization code, regressions introduced -- in v1.10.0. Go's timers are finicky -- ([#730](https://github.com/Shopify/sarama/pull/730), -- [#733](https://github.com/Shopify/sarama/pull/733), -- [#734](https://github.com/Shopify/sarama/pull/734)). -- - Handle consuming compressed relative offsets with Kafka 0.10 -- ([#735](https://github.com/Shopify/sarama/pull/735)). -- --#### Version 1.10.0 (2016-08-02) -- --_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of --Kafka you are running against (via the `config.Version` value) in order to use --features that may not be compatible with old Kafka versions. If you don't --specify this value it will default to 0.8.2 (the minimum supported), and trying --to use more recent features (like the offset manager) will fail with an error. -- --_Also:_ The offset-manager's behaviour has been changed to match the upstream --java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and --[#713](https://github.com/Shopify/sarama/pull/713)). If you use the --offset-manager, please ensure that you are committing one *greater* than the --last consumed message offset or else you may end up consuming duplicate --messages. -- --New Features: -- - Support for Kafka 0.10 -- ([#672](https://github.com/Shopify/sarama/pull/672), -- [#678](https://github.com/Shopify/sarama/pull/678), -- [#681](https://github.com/Shopify/sarama/pull/681), and others). -- - Support for configuring the target Kafka version -- ([#676](https://github.com/Shopify/sarama/pull/676)). -- - Batch producing support in the SyncProducer -- ([#677](https://github.com/Shopify/sarama/pull/677)). -- - Extend producer mock to allow setting expectations on message contents -- ([#667](https://github.com/Shopify/sarama/pull/667)). -- --Improvements: -- - Support `nil` compressed messages for deleting in compacted topics -- ([#634](https://github.com/Shopify/sarama/pull/634)). -- - Pre-allocate decoding errors, greatly reducing heap usage and GC time against -- misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)). -- - Re-use consumer expiry timers, removing one allocation per consumed message -- ([#707](https://github.com/Shopify/sarama/pull/707)). -- --Bug Fixes: -- - Actually default the client ID to ""sarama"" like we say we do -- ([#664](https://github.com/Shopify/sarama/pull/664)). -- - Fix a rare issue where `Client.Leader` could return the wrong error -- ([#685](https://github.com/Shopify/sarama/pull/685)). -- - Fix a possible tight loop in the consumer -- ([#693](https://github.com/Shopify/sarama/pull/693)). -- - Match upstream's offset-tracking behaviour -- ([#705](https://github.com/Shopify/sarama/pull/705)). -- - Report UnknownTopicOrPartition errors from the offset manager -- ([#706](https://github.com/Shopify/sarama/pull/706)). -- - Fix possible negative partition value from the HashPartitioner -- ([#709](https://github.com/Shopify/sarama/pull/709)). -- --#### Version 1.9.0 (2016-05-16) -- --New Features: -- - Add support for custom offset manager retention durations -- ([#602](https://github.com/Shopify/sarama/pull/602)). -- - Publish low-level mocks to enable testing of third-party producer/consumer -- implementations ([#570](https://github.com/Shopify/sarama/pull/570)). -- - Declare support for Golang 1.6 -- ([#611](https://github.com/Shopify/sarama/pull/611)). -- - Support for SASL plain-text auth -- ([#648](https://github.com/Shopify/sarama/pull/648)). -- --Improvements: -- - Simplified broker locking scheme slightly -- ([#604](https://github.com/Shopify/sarama/pull/604)). -- - Documentation cleanup -- ([#605](https://github.com/Shopify/sarama/pull/605), -- [#621](https://github.com/Shopify/sarama/pull/621), -- [#654](https://github.com/Shopify/sarama/pull/654)). -- --Bug Fixes: -- - Fix race condition shutting down the OffsetManager -- ([#658](https://github.com/Shopify/sarama/pull/658)). -- --#### Version 1.8.0 (2016-02-01) -- --New Features: -- - Full support for Kafka 0.9: -- - All protocol messages and fields -- ([#586](https://github.com/Shopify/sarama/pull/586), -- [#588](https://github.com/Shopify/sarama/pull/588), -- [#590](https://github.com/Shopify/sarama/pull/590)). -- - Verified that TLS support works -- ([#581](https://github.com/Shopify/sarama/pull/581)). -- - Fixed the OffsetManager compatibility -- ([#585](https://github.com/Shopify/sarama/pull/585)). -- --Improvements: -- - Optimize for fewer system calls when reading from the network -- ([#584](https://github.com/Shopify/sarama/pull/584)). -- - Automatically retry `InvalidMessage` errors to match upstream behaviour -- ([#589](https://github.com/Shopify/sarama/pull/589)). -- --#### Version 1.7.0 (2015-12-11) -- --New Features: -- - Preliminary support for Kafka 0.9 -- ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several -- caveats: -- - Protocol-layer support is mostly in place -- ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9 -- renamed some messages and fields, which we did not in order to preserve API -- compatibility. -- - The producer and consumer work against 0.9, but the offset manager does -- not ([#573](https://github.com/Shopify/sarama/pull/573)). -- - TLS support may or may not work -- ([#581](https://github.com/Shopify/sarama/pull/581)). -- --Improvements: -- - Don't wait for request timeouts on dead brokers, greatly speeding recovery -- when the TCP connection is left hanging -- ([#548](https://github.com/Shopify/sarama/pull/548)). -- - Refactored part of the producer. The new version provides a much more elegant -- solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also -- slightly more efficient, and much more precise in calculating batch sizes -- when compression is used -- ([#549](https://github.com/Shopify/sarama/pull/549), -- [#550](https://github.com/Shopify/sarama/pull/550), -- [#551](https://github.com/Shopify/sarama/pull/551)). -- --Bug Fixes: -- - Fix race condition in consumer test mock -- ([#553](https://github.com/Shopify/sarama/pull/553)). -- --#### Version 1.6.1 (2015-09-25) -- --Bug Fixes: -- - Fix panic that could occur if a user-supplied message value failed to encode -- ([#449](https://github.com/Shopify/sarama/pull/449)). -- --#### Version 1.6.0 (2015-09-04) -- --New Features: -- - Implementation of a consumer offset manager using the APIs introduced in -- Kafka 0.8.2. The API is designed mainly for integration into a future -- high-level consumer, not for direct use, although it is *possible* to use it -- directly. -- ([#461](https://github.com/Shopify/sarama/pull/461)). -- --Improvements: -- - CRC32 calculation is much faster on machines with SSE4.2 instructions, -- removing a major hotspot from most profiles -- ([#255](https://github.com/Shopify/sarama/pull/255)). -- --Bug Fixes: -- - Make protocol decoding more robust against some malformed packets generated -- by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523), -- [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways -- ([#528](https://github.com/Shopify/sarama/pull/528)). -- - Fix a potential race condition panic in the consumer on shutdown -- ([#529](https://github.com/Shopify/sarama/pull/529)). -- --#### Version 1.5.0 (2015-08-17) -- --New Features: -- - TLS-encrypted network connections are now supported. This feature is subject -- to change when Kafka releases built-in TLS support, but for now this is -- enough to work with TLS-terminating proxies -- ([#154](https://github.com/Shopify/sarama/pull/154)). -- --Improvements: -- - The consumer will not block if a single partition is not drained by the user; -- all other partitions will continue to consume normally -- ([#485](https://github.com/Shopify/sarama/pull/485)). -- - Formatting of error strings has been much improved -- ([#495](https://github.com/Shopify/sarama/pull/495)). -- - Internal refactoring of the producer for code cleanliness and to enable -- future work ([#300](https://github.com/Shopify/sarama/pull/300)). -- --Bug Fixes: -- - Fix a potential deadlock in the consumer on shutdown -- ([#475](https://github.com/Shopify/sarama/pull/475)). -- --#### Version 1.4.3 (2015-07-21) -- --Bug Fixes: -- - Don't include the partitioner in the producer's ""fetch partitions"" -- circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)). -- - Don't retry messages until the broker is closed when abandoning a broker in -- the producer ([#468](https://github.com/Shopify/sarama/pull/468)). -- - Update the import path for snappy-go, it has moved again and the API has -- changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)). -- --#### Version 1.4.2 (2015-05-27) -- --Bug Fixes: -- - Update the import path for snappy-go, it has moved from google code to github -- ([#456](https://github.com/Shopify/sarama/pull/456)). -- --#### Version 1.4.1 (2015-05-25) -- --Improvements: -- - Optimizations when decoding snappy messages, thanks to John Potocny -- ([#446](https://github.com/Shopify/sarama/pull/446)). -- --Bug Fixes: -- - Fix hypothetical race conditions on producer shutdown -- ([#450](https://github.com/Shopify/sarama/pull/450), -- [#451](https://github.com/Shopify/sarama/pull/451)). -- --#### Version 1.4.0 (2015-05-01) -- --New Features: -- - The consumer now implements `Topics()` and `Partitions()` methods to enable -- users to dynamically choose what topics/partitions to consume without -- instantiating a full client -- ([#431](https://github.com/Shopify/sarama/pull/431)). -- - The partition-consumer now exposes the high water mark offset value returned -- by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)). -- - Added a `kafka-console-consumer` tool capable of handling multiple -- partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` -- ([#439](https://github.com/Shopify/sarama/pull/439), -- [#442](https://github.com/Shopify/sarama/pull/442)). -- --Improvements: -- - The producer's logging during retry scenarios is more consistent, more -- useful, and slightly less verbose -- ([#429](https://github.com/Shopify/sarama/pull/429)). -- - The client now shuffles its initial list of seed brokers in order to prevent -- thundering herd on the first broker in the list -- ([#441](https://github.com/Shopify/sarama/pull/441)). -- --Bug Fixes: -- - The producer now correctly manages its state if retries occur when it is -- shutting down, fixing several instances of confusing behaviour and at least -- one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)). -- - The consumer now handles messages for different partitions asynchronously, -- making it much more resilient to specific user code ordering -- ([#325](https://github.com/Shopify/sarama/pull/325)). -- --#### Version 1.3.0 (2015-04-16) -- --New Features: -- - The client now tracks consumer group coordinators using -- ConsumerMetadataRequests similar to how it tracks partition leadership using -- regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)). -- This adds two methods to the client API: -- - `Coordinator(consumerGroup string) (*Broker, error)` -- - `RefreshCoordinator(consumerGroup string) error` -- --Improvements: -- - ConsumerMetadataResponses now automatically create a Broker object out of the -- ID/address/port combination for the Coordinator; accessing the fields -- individually has been deprecated -- ([#413](https://github.com/Shopify/sarama/pull/413)). -- - Much improved handling of `OffsetOutOfRange` errors in the consumer. -- Consumers will fail to start if the provided offset is out of range -- ([#418](https://github.com/Shopify/sarama/pull/418)) -- and they will automatically shut down if the offset falls out of range -- ([#424](https://github.com/Shopify/sarama/pull/424)). -- - Small performance improvement in encoding and decoding protocol messages -- ([#427](https://github.com/Shopify/sarama/pull/427)). -- --Bug Fixes: -- - Fix a rare race condition in the client's background metadata refresher if -- it happens to be activated while the client is being closed -- ([#422](https://github.com/Shopify/sarama/pull/422)). -- --#### Version 1.2.0 (2015-04-07) -- --Improvements: -- - The producer's behaviour when `Flush.Frequency` is set is now more intuitive -- ([#389](https://github.com/Shopify/sarama/pull/389)). -- - The producer is now somewhat more memory-efficient during and after retrying -- messages due to an improved queue implementation -- ([#396](https://github.com/Shopify/sarama/pull/396)). -- - The consumer produces much more useful logging output when leadership -- changes ([#385](https://github.com/Shopify/sarama/pull/385)). -- - The client's `GetOffset` method will now automatically refresh metadata and -- retry once in the event of stale information or similar -- ([#394](https://github.com/Shopify/sarama/pull/394)). -- - Broker connections now have support for using TCP keepalives -- ([#407](https://github.com/Shopify/sarama/issues/407)). -- --Bug Fixes: -- - The OffsetCommitRequest message now correctly implements all three possible -- API versions ([#390](https://github.com/Shopify/sarama/pull/390), -- [#400](https://github.com/Shopify/sarama/pull/400)). -- --#### Version 1.1.0 (2015-03-20) -- --Improvements: -- - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly -- broken topics don't choke throughput -- ([#373](https://github.com/Shopify/sarama/pull/373)). -- --Bug Fixes: -- - Fix the producer's internal reference counting in certain unusual scenarios -- ([#367](https://github.com/Shopify/sarama/pull/367)). -- - Fix the consumer's internal reference counting in certain unusual scenarios -- ([#369](https://github.com/Shopify/sarama/pull/369)). -- - Fix a condition where the producer's internal control messages could have -- gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)). -- - Fix an issue where invalid partition lists would be cached when asking for -- metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)). -- -- --#### Version 1.0.0 (2015-03-17) -- --Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: -- --- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. --- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. --- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package. --- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. --- All the configuration values have been unified in the `Config` struct. --- Much improved test suite. -diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/Shopify/sarama/LICENSE -deleted file mode 100644 -index 8121b63b1c4ac..0000000000000 ---- a/vendor/github.com/Shopify/sarama/LICENSE -+++ /dev/null -@@ -1,20 +0,0 @@ --Copyright (c) 2013 Evan Huus -- --Permission is hereby granted, free of charge, to any person obtaining --a copy of this software and associated documentation files (the --""Software""), to deal in the Software without restriction, including --without limitation the rights to use, copy, modify, merge, publish, --distribute, sublicense, and/or sell copies of the Software, and to --permit persons to whom the Software is furnished to do so, subject to --the following conditions: -- --The above copyright notice and this permission notice shall be --included in all copies or substantial portions of the Software. -- --THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND, --EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF --MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND --NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE --LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION --OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION --WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile -deleted file mode 100644 -index 58a39e4f34ddb..0000000000000 ---- a/vendor/github.com/Shopify/sarama/Makefile -+++ /dev/null -@@ -1,29 +0,0 @@ --default: fmt vet errcheck test -- --# Taken from https://github.com/codecov/example-go#caveat-multiple-files --test: -- echo """" > coverage.txt -- for d in `go list ./... | grep -v vendor`; do \ -- go test -v -timeout 60s -race -coverprofile=profile.out -covermode=atomic $$d; \ -- if [ -f profile.out ]; then \ -- cat profile.out >> coverage.txt; \ -- rm profile.out; \ -- fi \ -- done -- --vet: -- go vet ./... -- --errcheck: -- errcheck github.com/Shopify/sarama/... -- --fmt: -- @if [ -n ""$$(go fmt ./...)"" ]; then echo 'Please run go fmt on your code.' && exit 1; fi -- --install_dependencies: install_errcheck get -- --install_errcheck: -- go get github.com/kisielk/errcheck -- --get: -- go get -t -diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md -deleted file mode 100644 -index 28431f13eb8fc..0000000000000 ---- a/vendor/github.com/Shopify/sarama/README.md -+++ /dev/null -@@ -1,39 +0,0 @@ --sarama --====== -- --[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama) --[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama) --[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) -- --Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later). -- --### Getting started -- --- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama). --- Mocks for testing are available in the [mocks](./mocks) subpackage. --- The [examples](./examples) directory contains more elaborate example applications. --- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. -- --You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions). -- --### Compatibility and API stability -- --Sarama provides a ""2 releases + 2 months"" compatibility guarantee: we support --the two latest stable releases of Kafka and Go, and we provide a two month --grace period for older releases. This means we currently officially support --Go 1.9 and 1.8, and Kafka 1.0 through 0.10, although older releases are --still likely to work. -- --Sarama follows semantic versioning and provides API stability via the gopkg.in service. --You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. --A changelog is available [here](CHANGELOG.md). -- --### Contributing -- --* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md). --* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more -- technical and design details. --* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) -- contains a wealth of useful information. --* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. --* If you have any questions, just ask! -diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile -deleted file mode 100644 -index f4b848a301bb7..0000000000000 ---- a/vendor/github.com/Shopify/sarama/Vagrantfile -+++ /dev/null -@@ -1,20 +0,0 @@ --# -*- mode: ruby -*- --# vi: set ft=ruby : -- --# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! --VAGRANTFILE_API_VERSION = ""2"" -- --# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB --MEMORY = 3072 -- --Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| -- config.vm.box = ""ubuntu/trusty64"" -- -- config.vm.provision :shell, path: ""vagrant/provision.sh"" -- -- config.vm.network ""private_network"", ip: ""192.168.100.67"" -- -- config.vm.provider ""virtualbox"" do |v| -- v.memory = MEMORY -- end --end -diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/Shopify/sarama/acl_bindings.go -deleted file mode 100644 -index 51517359abcc0..0000000000000 ---- a/vendor/github.com/Shopify/sarama/acl_bindings.go -+++ /dev/null -@@ -1,119 +0,0 @@ --package sarama -- --type Resource struct { -- ResourceType AclResourceType -- ResourceName string --} -- --func (r *Resource) encode(pe packetEncoder) error { -- pe.putInt8(int8(r.ResourceType)) -- -- if err := pe.putString(r.ResourceName); err != nil { -- return err -- } -- -- return nil --} -- --func (r *Resource) decode(pd packetDecoder, version int16) (err error) { -- resourceType, err := pd.getInt8() -- if err != nil { -- return err -- } -- r.ResourceType = AclResourceType(resourceType) -- -- if r.ResourceName, err = pd.getString(); err != nil { -- return err -- } -- -- return nil --} -- --type Acl struct { -- Principal string -- Host string -- Operation AclOperation -- PermissionType AclPermissionType --} -- --func (a *Acl) encode(pe packetEncoder) error { -- if err := pe.putString(a.Principal); err != nil { -- return err -- } -- -- if err := pe.putString(a.Host); err != nil { -- return err -- } -- -- pe.putInt8(int8(a.Operation)) -- pe.putInt8(int8(a.PermissionType)) -- -- return nil --} -- --func (a *Acl) decode(pd packetDecoder, version int16) (err error) { -- if a.Principal, err = pd.getString(); err != nil { -- return err -- } -- -- if a.Host, err = pd.getString(); err != nil { -- return err -- } -- -- operation, err := pd.getInt8() -- if err != nil { -- return err -- } -- a.Operation = AclOperation(operation) -- -- permissionType, err := pd.getInt8() -- if err != nil { -- return err -- } -- a.PermissionType = AclPermissionType(permissionType) -- -- return nil --} -- --type ResourceAcls struct { -- Resource -- Acls []*Acl --} -- --func (r *ResourceAcls) encode(pe packetEncoder) error { -- if err := r.Resource.encode(pe); err != nil { -- return err -- } -- -- if err := pe.putArrayLength(len(r.Acls)); err != nil { -- return err -- } -- for _, acl := range r.Acls { -- if err := acl.encode(pe); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (r *ResourceAcls) decode(pd packetDecoder, version int16) error { -- if err := r.Resource.decode(pd, version); err != nil { -- return err -- } -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- r.Acls = make([]*Acl, n) -- for i := 0; i < n; i++ { -- r.Acls[i] = new(Acl) -- if err := r.Acls[i].decode(pd, version); err != nil { -- return err -- } -- } -- -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go -deleted file mode 100644 -index 0b6ecbec3e1b0..0000000000000 ---- a/vendor/github.com/Shopify/sarama/acl_create_request.go -+++ /dev/null -@@ -1,76 +0,0 @@ --package sarama -- --type CreateAclsRequest struct { -- AclCreations []*AclCreation --} -- --func (c *CreateAclsRequest) encode(pe packetEncoder) error { -- if err := pe.putArrayLength(len(c.AclCreations)); err != nil { -- return err -- } -- -- for _, aclCreation := range c.AclCreations { -- if err := aclCreation.encode(pe); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) { -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- c.AclCreations = make([]*AclCreation, n) -- -- for i := 0; i < n; i++ { -- c.AclCreations[i] = new(AclCreation) -- if err := c.AclCreations[i].decode(pd, version); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (d *CreateAclsRequest) key() int16 { -- return 30 --} -- --func (d *CreateAclsRequest) version() int16 { -- return 0 --} -- --func (d *CreateAclsRequest) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -- --type AclCreation struct { -- Resource -- Acl --} -- --func (a *AclCreation) encode(pe packetEncoder) error { -- if err := a.Resource.encode(pe); err != nil { -- return err -- } -- if err := a.Acl.encode(pe); err != nil { -- return err -- } -- -- return nil --} -- --func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) { -- if err := a.Resource.decode(pd, version); err != nil { -- return err -- } -- if err := a.Acl.decode(pd, version); err != nil { -- return err -- } -- -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go -deleted file mode 100644 -index 8a56f357354de..0000000000000 ---- a/vendor/github.com/Shopify/sarama/acl_create_response.go -+++ /dev/null -@@ -1,88 +0,0 @@ --package sarama -- --import ""time"" -- --type CreateAclsResponse struct { -- ThrottleTime time.Duration -- AclCreationResponses []*AclCreationResponse --} -- --func (c *CreateAclsResponse) encode(pe packetEncoder) error { -- pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) -- -- if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil { -- return err -- } -- -- for _, aclCreationResponse := range c.AclCreationResponses { -- if err := aclCreationResponse.encode(pe); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) { -- throttleTime, err := pd.getInt32() -- if err != nil { -- return err -- } -- c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- c.AclCreationResponses = make([]*AclCreationResponse, n) -- for i := 0; i < n; i++ { -- c.AclCreationResponses[i] = new(AclCreationResponse) -- if err := c.AclCreationResponses[i].decode(pd, version); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (d *CreateAclsResponse) key() int16 { -- return 30 --} -- --func (d *CreateAclsResponse) version() int16 { -- return 0 --} -- --func (d *CreateAclsResponse) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -- --type AclCreationResponse struct { -- Err KError -- ErrMsg *string --} -- --func (a *AclCreationResponse) encode(pe packetEncoder) error { -- pe.putInt16(int16(a.Err)) -- -- if err := pe.putNullableString(a.ErrMsg); err != nil { -- return err -- } -- -- return nil --} -- --func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) { -- kerr, err := pd.getInt16() -- if err != nil { -- return err -- } -- a.Err = KError(kerr) -- -- if a.ErrMsg, err = pd.getNullableString(); err != nil { -- return err -- } -- -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go -deleted file mode 100644 -index 4133dceab71de..0000000000000 ---- a/vendor/github.com/Shopify/sarama/acl_delete_request.go -+++ /dev/null -@@ -1,48 +0,0 @@ --package sarama -- --type DeleteAclsRequest struct { -- Filters []*AclFilter --} -- --func (d *DeleteAclsRequest) encode(pe packetEncoder) error { -- if err := pe.putArrayLength(len(d.Filters)); err != nil { -- return err -- } -- -- for _, filter := range d.Filters { -- if err := filter.encode(pe); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) { -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- d.Filters = make([]*AclFilter, n) -- for i := 0; i < n; i++ { -- d.Filters[i] = new(AclFilter) -- if err := d.Filters[i].decode(pd, version); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (d *DeleteAclsRequest) key() int16 { -- return 31 --} -- --func (d *DeleteAclsRequest) version() int16 { -- return 0 --} -- --func (d *DeleteAclsRequest) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go -deleted file mode 100644 -index b5e1c45eb5d38..0000000000000 ---- a/vendor/github.com/Shopify/sarama/acl_delete_response.go -+++ /dev/null -@@ -1,155 +0,0 @@ --package sarama -- --import ""time"" -- --type DeleteAclsResponse struct { -- ThrottleTime time.Duration -- FilterResponses []*FilterResponse --} -- --func (a *DeleteAclsResponse) encode(pe packetEncoder) error { -- pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) -- -- if err := pe.putArrayLength(len(a.FilterResponses)); err != nil { -- return err -- } -- -- for _, filterResponse := range a.FilterResponses { -- if err := filterResponse.encode(pe); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (a *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) { -- throttleTime, err := pd.getInt32() -- if err != nil { -- return err -- } -- a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- a.FilterResponses = make([]*FilterResponse, n) -- -- for i := 0; i < n; i++ { -- a.FilterResponses[i] = new(FilterResponse) -- if err := a.FilterResponses[i].decode(pd, version); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (d *DeleteAclsResponse) key() int16 { -- return 31 --} -- --func (d *DeleteAclsResponse) version() int16 { -- return 0 --} -- --func (d *DeleteAclsResponse) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -- --type FilterResponse struct { -- Err KError -- ErrMsg *string -- MatchingAcls []*MatchingAcl --} -- --func (f *FilterResponse) encode(pe packetEncoder) error { -- pe.putInt16(int16(f.Err)) -- if err := pe.putNullableString(f.ErrMsg); err != nil { -- return err -- } -- -- if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil { -- return err -- } -- for _, matchingAcl := range f.MatchingAcls { -- if err := matchingAcl.encode(pe); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) { -- kerr, err := pd.getInt16() -- if err != nil { -- return err -- } -- f.Err = KError(kerr) -- -- if f.ErrMsg, err = pd.getNullableString(); err != nil { -- return err -- } -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- f.MatchingAcls = make([]*MatchingAcl, n) -- for i := 0; i < n; i++ { -- f.MatchingAcls[i] = new(MatchingAcl) -- if err := f.MatchingAcls[i].decode(pd, version); err != nil { -- return err -- } -- } -- -- return nil --} -- --type MatchingAcl struct { -- Err KError -- ErrMsg *string -- Resource -- Acl --} -- --func (m *MatchingAcl) encode(pe packetEncoder) error { -- pe.putInt16(int16(m.Err)) -- if err := pe.putNullableString(m.ErrMsg); err != nil { -- return err -- } -- -- if err := m.Resource.encode(pe); err != nil { -- return err -- } -- -- if err := m.Acl.encode(pe); err != nil { -- return err -- } -- -- return nil --} -- --func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) { -- kerr, err := pd.getInt16() -- if err != nil { -- return err -- } -- m.Err = KError(kerr) -- -- if m.ErrMsg, err = pd.getNullableString(); err != nil { -- return err -- } -- -- if err := m.Resource.decode(pd, version); err != nil { -- return err -- } -- -- if err := m.Acl.decode(pd, version); err != nil { -- return err -- } -- -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go -deleted file mode 100644 -index 02a5a1f0e223c..0000000000000 ---- a/vendor/github.com/Shopify/sarama/acl_describe_request.go -+++ /dev/null -@@ -1,25 +0,0 @@ --package sarama -- --type DescribeAclsRequest struct { -- AclFilter --} -- --func (d *DescribeAclsRequest) encode(pe packetEncoder) error { -- return d.AclFilter.encode(pe) --} -- --func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) { -- return d.AclFilter.decode(pd, version) --} -- --func (d *DescribeAclsRequest) key() int16 { -- return 29 --} -- --func (d *DescribeAclsRequest) version() int16 { -- return 0 --} -- --func (d *DescribeAclsRequest) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go -deleted file mode 100644 -index 5bc9497f4c550..0000000000000 ---- a/vendor/github.com/Shopify/sarama/acl_describe_response.go -+++ /dev/null -@@ -1,80 +0,0 @@ --package sarama -- --import ""time"" -- --type DescribeAclsResponse struct { -- ThrottleTime time.Duration -- Err KError -- ErrMsg *string -- ResourceAcls []*ResourceAcls --} -- --func (d *DescribeAclsResponse) encode(pe packetEncoder) error { -- pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) -- pe.putInt16(int16(d.Err)) -- -- if err := pe.putNullableString(d.ErrMsg); err != nil { -- return err -- } -- -- if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil { -- return err -- } -- -- for _, resourceAcl := range d.ResourceAcls { -- if err := resourceAcl.encode(pe); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) { -- throttleTime, err := pd.getInt32() -- if err != nil { -- return err -- } -- d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond -- -- kerr, err := pd.getInt16() -- if err != nil { -- return err -- } -- d.Err = KError(kerr) -- -- errmsg, err := pd.getString() -- if err != nil { -- return err -- } -- if errmsg != """" { -- d.ErrMsg = &errmsg -- } -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- d.ResourceAcls = make([]*ResourceAcls, n) -- -- for i := 0; i < n; i++ { -- d.ResourceAcls[i] = new(ResourceAcls) -- if err := d.ResourceAcls[i].decode(pd, version); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (d *DescribeAclsResponse) key() int16 { -- return 29 --} -- --func (d *DescribeAclsResponse) version() int16 { -- return 0 --} -- --func (d *DescribeAclsResponse) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/Shopify/sarama/acl_filter.go -deleted file mode 100644 -index 97063542198c9..0000000000000 ---- a/vendor/github.com/Shopify/sarama/acl_filter.go -+++ /dev/null -@@ -1,61 +0,0 @@ --package sarama -- --type AclFilter struct { -- ResourceType AclResourceType -- ResourceName *string -- Principal *string -- Host *string -- Operation AclOperation -- PermissionType AclPermissionType --} -- --func (a *AclFilter) encode(pe packetEncoder) error { -- pe.putInt8(int8(a.ResourceType)) -- if err := pe.putNullableString(a.ResourceName); err != nil { -- return err -- } -- if err := pe.putNullableString(a.Principal); err != nil { -- return err -- } -- if err := pe.putNullableString(a.Host); err != nil { -- return err -- } -- pe.putInt8(int8(a.Operation)) -- pe.putInt8(int8(a.PermissionType)) -- -- return nil --} -- --func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) { -- resourceType, err := pd.getInt8() -- if err != nil { -- return err -- } -- a.ResourceType = AclResourceType(resourceType) -- -- if a.ResourceName, err = pd.getNullableString(); err != nil { -- return err -- } -- -- if a.Principal, err = pd.getNullableString(); err != nil { -- return err -- } -- -- if a.Host, err = pd.getNullableString(); err != nil { -- return err -- } -- -- operation, err := pd.getInt8() -- if err != nil { -- return err -- } -- a.Operation = AclOperation(operation) -- -- permissionType, err := pd.getInt8() -- if err != nil { -- return err -- } -- a.PermissionType = AclPermissionType(permissionType) -- -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go -deleted file mode 100644 -index 19da6f2f451f8..0000000000000 ---- a/vendor/github.com/Shopify/sarama/acl_types.go -+++ /dev/null -@@ -1,42 +0,0 @@ --package sarama -- --type AclOperation int -- --// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java --const ( -- AclOperationUnknown AclOperation = 0 -- AclOperationAny AclOperation = 1 -- AclOperationAll AclOperation = 2 -- AclOperationRead AclOperation = 3 -- AclOperationWrite AclOperation = 4 -- AclOperationCreate AclOperation = 5 -- AclOperationDelete AclOperation = 6 -- AclOperationAlter AclOperation = 7 -- AclOperationDescribe AclOperation = 8 -- AclOperationClusterAction AclOperation = 9 -- AclOperationDescribeConfigs AclOperation = 10 -- AclOperationAlterConfigs AclOperation = 11 -- AclOperationIdempotentWrite AclOperation = 12 --) -- --type AclPermissionType int -- --// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java --const ( -- AclPermissionUnknown AclPermissionType = 0 -- AclPermissionAny AclPermissionType = 1 -- AclPermissionDeny AclPermissionType = 2 -- AclPermissionAllow AclPermissionType = 3 --) -- --type AclResourceType int -- --// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java --const ( -- AclResourceUnknown AclResourceType = 0 -- AclResourceAny AclResourceType = 1 -- AclResourceTopic AclResourceType = 2 -- AclResourceGroup AclResourceType = 3 -- AclResourceCluster AclResourceType = 4 -- AclResourceTransactionalID AclResourceType = 5 --) -diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go -deleted file mode 100644 -index 6da166c634b6b..0000000000000 ---- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go -+++ /dev/null -@@ -1,52 +0,0 @@ --package sarama -- --type AddOffsetsToTxnRequest struct { -- TransactionalID string -- ProducerID int64 -- ProducerEpoch int16 -- GroupID string --} -- --func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error { -- if err := pe.putString(a.TransactionalID); err != nil { -- return err -- } -- -- pe.putInt64(a.ProducerID) -- -- pe.putInt16(a.ProducerEpoch) -- -- if err := pe.putString(a.GroupID); err != nil { -- return err -- } -- -- return nil --} -- --func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) { -- if a.TransactionalID, err = pd.getString(); err != nil { -- return err -- } -- if a.ProducerID, err = pd.getInt64(); err != nil { -- return err -- } -- if a.ProducerEpoch, err = pd.getInt16(); err != nil { -- return err -- } -- if a.GroupID, err = pd.getString(); err != nil { -- return err -- } -- return nil --} -- --func (a *AddOffsetsToTxnRequest) key() int16 { -- return 25 --} -- --func (a *AddOffsetsToTxnRequest) version() int16 { -- return 0 --} -- --func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go -deleted file mode 100644 -index 3a46151a050a4..0000000000000 ---- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go -+++ /dev/null -@@ -1,44 +0,0 @@ --package sarama -- --import ( -- ""time"" --) -- --type AddOffsetsToTxnResponse struct { -- ThrottleTime time.Duration -- Err KError --} -- --func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error { -- pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) -- pe.putInt16(int16(a.Err)) -- return nil --} -- --func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) { -- throttleTime, err := pd.getInt32() -- if err != nil { -- return err -- } -- a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond -- -- kerr, err := pd.getInt16() -- if err != nil { -- return err -- } -- a.Err = KError(kerr) -- -- return nil --} -- --func (a *AddOffsetsToTxnResponse) key() int16 { -- return 25 --} -- --func (a *AddOffsetsToTxnResponse) version() int16 { -- return 0 --} -- --func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go -deleted file mode 100644 -index a8a59225e4d20..0000000000000 ---- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go -+++ /dev/null -@@ -1,76 +0,0 @@ --package sarama -- --type AddPartitionsToTxnRequest struct { -- TransactionalID string -- ProducerID int64 -- ProducerEpoch int16 -- TopicPartitions map[string][]int32 --} -- --func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error { -- if err := pe.putString(a.TransactionalID); err != nil { -- return err -- } -- pe.putInt64(a.ProducerID) -- pe.putInt16(a.ProducerEpoch) -- -- if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil { -- return err -- } -- for topic, partitions := range a.TopicPartitions { -- if err := pe.putString(topic); err != nil { -- return err -- } -- if err := pe.putInt32Array(partitions); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) { -- if a.TransactionalID, err = pd.getString(); err != nil { -- return err -- } -- if a.ProducerID, err = pd.getInt64(); err != nil { -- return err -- } -- if a.ProducerEpoch, err = pd.getInt16(); err != nil { -- return err -- } -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- a.TopicPartitions = make(map[string][]int32) -- for i := 0; i < n; i++ { -- topic, err := pd.getString() -- if err != nil { -- return err -- } -- -- partitions, err := pd.getInt32Array() -- if err != nil { -- return err -- } -- -- a.TopicPartitions[topic] = partitions -- } -- -- return nil --} -- --func (a *AddPartitionsToTxnRequest) key() int16 { -- return 24 --} -- --func (a *AddPartitionsToTxnRequest) version() int16 { -- return 0 --} -- --func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go -deleted file mode 100644 -index 581c556c5ce6c..0000000000000 ---- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go -+++ /dev/null -@@ -1,108 +0,0 @@ --package sarama -- --import ( -- ""time"" --) -- --type AddPartitionsToTxnResponse struct { -- ThrottleTime time.Duration -- Errors map[string][]*PartitionError --} -- --func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error { -- pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) -- if err := pe.putArrayLength(len(a.Errors)); err != nil { -- return err -- } -- -- for topic, e := range a.Errors { -- if err := pe.putString(topic); err != nil { -- return err -- } -- if err := pe.putArrayLength(len(e)); err != nil { -- return err -- } -- for _, partitionError := range e { -- if err := partitionError.encode(pe); err != nil { -- return err -- } -- } -- } -- -- return nil --} -- --func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) { -- throttleTime, err := pd.getInt32() -- if err != nil { -- return err -- } -- a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- a.Errors = make(map[string][]*PartitionError) -- -- for i := 0; i < n; i++ { -- topic, err := pd.getString() -- if err != nil { -- return err -- } -- -- m, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- a.Errors[topic] = make([]*PartitionError, m) -- -- for j := 0; j < m; j++ { -- a.Errors[topic][j] = new(PartitionError) -- if err := a.Errors[topic][j].decode(pd, version); err != nil { -- return err -- } -- } -- } -- -- return nil --} -- --func (a *AddPartitionsToTxnResponse) key() int16 { -- return 24 --} -- --func (a *AddPartitionsToTxnResponse) version() int16 { -- return 0 --} -- --func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -- --type PartitionError struct { -- Partition int32 -- Err KError --} -- --func (p *PartitionError) encode(pe packetEncoder) error { -- pe.putInt32(p.Partition) -- pe.putInt16(int16(p.Err)) -- return nil --} -- --func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) { -- if p.Partition, err = pd.getInt32(); err != nil { -- return err -- } -- -- kerr, err := pd.getInt16() -- if err != nil { -- return err -- } -- p.Err = KError(kerr) -- -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go -deleted file mode 100644 -index 48c44ead67a74..0000000000000 ---- a/vendor/github.com/Shopify/sarama/alter_configs_request.go -+++ /dev/null -@@ -1,120 +0,0 @@ --package sarama -- --type AlterConfigsRequest struct { -- Resources []*AlterConfigsResource -- ValidateOnly bool --} -- --type AlterConfigsResource struct { -- Type ConfigResourceType -- Name string -- ConfigEntries map[string]*string --} -- --func (acr *AlterConfigsRequest) encode(pe packetEncoder) error { -- if err := pe.putArrayLength(len(acr.Resources)); err != nil { -- return err -- } -- -- for _, r := range acr.Resources { -- if err := r.encode(pe); err != nil { -- return err -- } -- } -- -- pe.putBool(acr.ValidateOnly) -- return nil --} -- --func (acr *AlterConfigsRequest) decode(pd packetDecoder, version int16) error { -- resourceCount, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- acr.Resources = make([]*AlterConfigsResource, resourceCount) -- for i := range acr.Resources { -- r := &AlterConfigsResource{} -- err = r.decode(pd, version) -- if err != nil { -- return err -- } -- acr.Resources[i] = r -- } -- -- validateOnly, err := pd.getBool() -- if err != nil { -- return err -- } -- -- acr.ValidateOnly = validateOnly -- -- return nil --} -- --func (ac *AlterConfigsResource) encode(pe packetEncoder) error { -- pe.putInt8(int8(ac.Type)) -- -- if err := pe.putString(ac.Name); err != nil { -- return err -- } -- -- if err := pe.putArrayLength(len(ac.ConfigEntries)); err != nil { -- return err -- } -- for configKey, configValue := range ac.ConfigEntries { -- if err := pe.putString(configKey); err != nil { -- return err -- } -- if err := pe.putNullableString(configValue); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error { -- t, err := pd.getInt8() -- if err != nil { -- return err -- } -- ac.Type = ConfigResourceType(t) -- -- name, err := pd.getString() -- if err != nil { -- return err -- } -- ac.Name = name -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- if n > 0 { -- ac.ConfigEntries = make(map[string]*string, n) -- for i := 0; i < n; i++ { -- configKey, err := pd.getString() -- if err != nil { -- return err -- } -- if ac.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { -- return err -- } -- } -- } -- return err --} -- --func (acr *AlterConfigsRequest) key() int16 { -- return 33 --} -- --func (acr *AlterConfigsRequest) version() int16 { -- return 0 --} -- --func (acr *AlterConfigsRequest) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go -deleted file mode 100644 -index 29b09e1ff84bb..0000000000000 ---- a/vendor/github.com/Shopify/sarama/alter_configs_response.go -+++ /dev/null -@@ -1,95 +0,0 @@ --package sarama -- --import ""time"" -- --type AlterConfigsResponse struct { -- ThrottleTime time.Duration -- Resources []*AlterConfigsResourceResponse --} -- --type AlterConfigsResourceResponse struct { -- ErrorCode int16 -- ErrorMsg string -- Type ConfigResourceType -- Name string --} -- --func (ct *AlterConfigsResponse) encode(pe packetEncoder) error { -- pe.putInt32(int32(ct.ThrottleTime / time.Millisecond)) -- -- if err := pe.putArrayLength(len(ct.Resources)); err != nil { -- return err -- } -- -- for i := range ct.Resources { -- pe.putInt16(ct.Resources[i].ErrorCode) -- err := pe.putString(ct.Resources[i].ErrorMsg) -- if err != nil { -- return nil -- } -- pe.putInt8(int8(ct.Resources[i].Type)) -- err = pe.putString(ct.Resources[i].Name) -- if err != nil { -- return nil -- } -- } -- -- return nil --} -- --func (acr *AlterConfigsResponse) decode(pd packetDecoder, version int16) error { -- throttleTime, err := pd.getInt32() -- if err != nil { -- return err -- } -- acr.ThrottleTime = time.Duration(throttleTime) * time.Millisecond -- -- responseCount, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- acr.Resources = make([]*AlterConfigsResourceResponse, responseCount) -- -- for i := range acr.Resources { -- acr.Resources[i] = new(AlterConfigsResourceResponse) -- -- errCode, err := pd.getInt16() -- if err != nil { -- return err -- } -- acr.Resources[i].ErrorCode = errCode -- -- e, err := pd.getString() -- if err != nil { -- return err -- } -- acr.Resources[i].ErrorMsg = e -- -- t, err := pd.getInt8() -- if err != nil { -- return err -- } -- acr.Resources[i].Type = ConfigResourceType(t) -- -- name, err := pd.getString() -- if err != nil { -- return err -- } -- acr.Resources[i].Name = name -- } -- -- return nil --} -- --func (r *AlterConfigsResponse) key() int16 { -- return 32 --} -- --func (r *AlterConfigsResponse) version() int16 { -- return 0 --} -- --func (r *AlterConfigsResponse) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go -deleted file mode 100644 -index ab65f01ccff12..0000000000000 ---- a/vendor/github.com/Shopify/sarama/api_versions_request.go -+++ /dev/null -@@ -1,24 +0,0 @@ --package sarama -- --type ApiVersionsRequest struct { --} -- --func (r *ApiVersionsRequest) encode(pe packetEncoder) error { -- return nil --} -- --func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { -- return nil --} -- --func (r *ApiVersionsRequest) key() int16 { -- return 18 --} -- --func (r *ApiVersionsRequest) version() int16 { -- return 0 --} -- --func (r *ApiVersionsRequest) requiredVersion() KafkaVersion { -- return V0_10_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go -deleted file mode 100644 -index 23bc326e15f29..0000000000000 ---- a/vendor/github.com/Shopify/sarama/api_versions_response.go -+++ /dev/null -@@ -1,87 +0,0 @@ --package sarama -- --type ApiVersionsResponseBlock struct { -- ApiKey int16 -- MinVersion int16 -- MaxVersion int16 --} -- --func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error { -- pe.putInt16(b.ApiKey) -- pe.putInt16(b.MinVersion) -- pe.putInt16(b.MaxVersion) -- return nil --} -- --func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error { -- var err error -- -- if b.ApiKey, err = pd.getInt16(); err != nil { -- return err -- } -- -- if b.MinVersion, err = pd.getInt16(); err != nil { -- return err -- } -- -- if b.MaxVersion, err = pd.getInt16(); err != nil { -- return err -- } -- -- return nil --} -- --type ApiVersionsResponse struct { -- Err KError -- ApiVersions []*ApiVersionsResponseBlock --} -- --func (r *ApiVersionsResponse) encode(pe packetEncoder) error { -- pe.putInt16(int16(r.Err)) -- if err := pe.putArrayLength(len(r.ApiVersions)); err != nil { -- return err -- } -- for _, apiVersion := range r.ApiVersions { -- if err := apiVersion.encode(pe); err != nil { -- return err -- } -- } -- return nil --} -- --func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error { -- kerr, err := pd.getInt16() -- if err != nil { -- return err -- } -- -- r.Err = KError(kerr) -- -- numBlocks, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks) -- for i := 0; i < numBlocks; i++ { -- block := new(ApiVersionsResponseBlock) -- if err := block.decode(pd); err != nil { -- return err -- } -- r.ApiVersions[i] = block -- } -- -- return nil --} -- --func (r *ApiVersionsResponse) key() int16 { -- return 18 --} -- --func (r *ApiVersionsResponse) version() int16 { -- return 0 --} -- --func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { -- return V0_10_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go -deleted file mode 100644 -index 1eff81cbf6216..0000000000000 ---- a/vendor/github.com/Shopify/sarama/async_producer.go -+++ /dev/null -@@ -1,921 +0,0 @@ --package sarama -- --import ( -- ""encoding/binary"" -- ""fmt"" -- ""sync"" -- ""time"" -- -- ""github.com/eapache/go-resiliency/breaker"" -- ""github.com/eapache/queue"" --) -- --// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages --// to the correct broker for the provided topic-partition, refreshing metadata as appropriate, --// and parses responses for errors. You must read from the Errors() channel or the --// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid --// leaks: it will not be garbage-collected automatically when it passes out of --// scope. --type AsyncProducer interface { -- -- // AsyncClose triggers a shutdown of the producer. The shutdown has completed -- // when both the Errors and Successes channels have been closed. When calling -- // AsyncClose, you *must* continue to read from those channels in order to -- // drain the results of any messages in flight. -- AsyncClose() -- -- // Close shuts down the producer and waits for any buffered messages to be -- // flushed. You must call this function before a producer object passes out of -- // scope, as it may otherwise leak memory. You must call this before calling -- // Close on the underlying client. -- Close() error -- -- // Input is the input channel for the user to write messages to that they -- // wish to send. -- Input() chan<- *ProducerMessage -- -- // Successes is the success output channel back to the user when Return.Successes is -- // enabled. If Return.Successes is true, you MUST read from this channel or the -- // Producer will deadlock. It is suggested that you send and read messages -- // together in a single select statement. -- Successes() <-chan *ProducerMessage -- -- // Errors is the error output channel back to the user. You MUST read from this -- // channel or the Producer will deadlock when the channel is full. Alternatively, -- // you can set Producer.Return.Errors in your config to false, which prevents -- // errors to be returned. -- Errors() <-chan *ProducerError --} -- --type asyncProducer struct { -- client Client -- conf *Config -- ownClient bool -- -- errors chan *ProducerError -- input, successes, retries chan *ProducerMessage -- inFlight sync.WaitGroup -- -- brokers map[*Broker]chan<- *ProducerMessage -- brokerRefs map[chan<- *ProducerMessage]int -- brokerLock sync.Mutex --} -- --// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration. --func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) { -- client, err := NewClient(addrs, conf) -- if err != nil { -- return nil, err -- } -- -- p, err := NewAsyncProducerFromClient(client) -- if err != nil { -- return nil, err -- } -- p.(*asyncProducer).ownClient = true -- return p, nil --} -- --// NewAsyncProducerFromClient creates a new Producer using the given client. It is still --// necessary to call Close() on the underlying client when shutting down this producer. --func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) { -- // Check that we are not dealing with a closed Client before processing any other arguments -- if client.Closed() { -- return nil, ErrClosedClient -- } -- -- p := &asyncProducer{ -- client: client, -- conf: client.Config(), -- errors: make(chan *ProducerError), -- input: make(chan *ProducerMessage), -- successes: make(chan *ProducerMessage), -- retries: make(chan *ProducerMessage), -- brokers: make(map[*Broker]chan<- *ProducerMessage), -- brokerRefs: make(map[chan<- *ProducerMessage]int), -- } -- -- // launch our singleton dispatchers -- go withRecover(p.dispatcher) -- go withRecover(p.retryHandler) -- -- return p, nil --} -- --type flagSet int8 -- --const ( -- syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer -- fin // final message from partitionProducer to brokerProducer and back -- shutdown // start the shutdown process --) -- --// ProducerMessage is the collection of elements passed to the Producer in order to send a message. --type ProducerMessage struct { -- Topic string // The Kafka topic for this message. -- // The partitioning key for this message. Pre-existing Encoders include -- // StringEncoder and ByteEncoder. -- Key Encoder -- // The actual message to store in Kafka. Pre-existing Encoders include -- // StringEncoder and ByteEncoder. -- Value Encoder -- -- // The headers are key-value pairs that are transparently passed -- // by Kafka between producers and consumers. -- Headers []RecordHeader -- -- // This field is used to hold arbitrary data you wish to include so it -- // will be available when receiving on the Successes and Errors channels. -- // Sarama completely ignores this field and is only to be used for -- // pass-through data. -- Metadata interface{} -- -- // Below this point are filled in by the producer as the message is processed -- -- // Offset is the offset of the message stored on the broker. This is only -- // guaranteed to be defined if the message was successfully delivered and -- // RequiredAcks is not NoResponse. -- Offset int64 -- // Partition is the partition that the message was sent to. This is only -- // guaranteed to be defined if the message was successfully delivered. -- Partition int32 -- // Timestamp is the timestamp assigned to the message by the broker. This -- // is only guaranteed to be defined if the message was successfully -- // delivered, RequiredAcks is not NoResponse, and the Kafka broker is at -- // least version 0.10.0. -- Timestamp time.Time -- -- retries int -- flags flagSet --} -- --const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc. -- --func (m *ProducerMessage) byteSize(version int) int { -- var size int -- if version >= 2 { -- size = maximumRecordOverhead -- for _, h := range m.Headers { -- size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32 -- } -- } else { -- size = producerMessageOverhead -- } -- if m.Key != nil { -- size += m.Key.Length() -- } -- if m.Value != nil { -- size += m.Value.Length() -- } -- return size --} -- --func (m *ProducerMessage) clear() { -- m.flags = 0 -- m.retries = 0 --} -- --// ProducerError is the type of error generated when the producer fails to deliver a message. --// It contains the original ProducerMessage as well as the actual error value. --type ProducerError struct { -- Msg *ProducerMessage -- Err error --} -- --func (pe ProducerError) Error() string { -- return fmt.Sprintf(""kafka: Failed to produce message to topic %s: %s"", pe.Msg.Topic, pe.Err) --} -- --// ProducerErrors is a type that wraps a batch of ""ProducerError""s and implements the Error interface. --// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel --// when closing a producer. --type ProducerErrors []*ProducerError -- --func (pe ProducerErrors) Error() string { -- return fmt.Sprintf(""kafka: Failed to deliver %d messages."", len(pe)) --} -- --func (p *asyncProducer) Errors() <-chan *ProducerError { -- return p.errors --} -- --func (p *asyncProducer) Successes() <-chan *ProducerMessage { -- return p.successes --} -- --func (p *asyncProducer) Input() chan<- *ProducerMessage { -- return p.input --} -- --func (p *asyncProducer) Close() error { -- p.AsyncClose() -- -- if p.conf.Producer.Return.Successes { -- go withRecover(func() { -- for range p.successes { -- } -- }) -- } -- -- var errors ProducerErrors -- if p.conf.Producer.Return.Errors { -- for event := range p.errors { -- errors = append(errors, event) -- } -- } else { -- <-p.errors -- } -- -- if len(errors) > 0 { -- return errors -- } -- return nil --} -- --func (p *asyncProducer) AsyncClose() { -- go withRecover(p.shutdown) --} -- --// singleton --// dispatches messages by topic --func (p *asyncProducer) dispatcher() { -- handlers := make(map[string]chan<- *ProducerMessage) -- shuttingDown := false -- -- for msg := range p.input { -- if msg == nil { -- Logger.Println(""Something tried to send a nil message, it was ignored."") -- continue -- } -- -- if msg.flags&shutdown != 0 { -- shuttingDown = true -- p.inFlight.Done() -- continue -- } else if msg.retries == 0 { -- if shuttingDown { -- // we can't just call returnError here because that decrements the wait group, -- // which hasn't been incremented yet for this message, and shouldn't be -- pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown} -- if p.conf.Producer.Return.Errors { -- p.errors <- pErr -- } else { -- Logger.Println(pErr) -- } -- continue -- } -- p.inFlight.Add(1) -- } -- -- version := 1 -- if p.conf.Version.IsAtLeast(V0_11_0_0) { -- version = 2 -- } -- if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes { -- p.returnError(msg, ErrMessageSizeTooLarge) -- continue -- } -- -- handler := handlers[msg.Topic] -- if handler == nil { -- handler = p.newTopicProducer(msg.Topic) -- handlers[msg.Topic] = handler -- } -- -- handler <- msg -- } -- -- for _, handler := range handlers { -- close(handler) -- } --} -- --// one per topic --// partitions messages, then dispatches them by partition --type topicProducer struct { -- parent *asyncProducer -- topic string -- input <-chan *ProducerMessage -- -- breaker *breaker.Breaker -- handlers map[int32]chan<- *ProducerMessage -- partitioner Partitioner --} -- --func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage { -- input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) -- tp := &topicProducer{ -- parent: p, -- topic: topic, -- input: input, -- breaker: breaker.New(3, 1, 10*time.Second), -- handlers: make(map[int32]chan<- *ProducerMessage), -- partitioner: p.conf.Producer.Partitioner(topic), -- } -- go withRecover(tp.dispatch) -- return input --} -- --func (tp *topicProducer) dispatch() { -- for msg := range tp.input { -- if msg.retries == 0 { -- if err := tp.partitionMessage(msg); err != nil { -- tp.parent.returnError(msg, err) -- continue -- } -- } -- -- handler := tp.handlers[msg.Partition] -- if handler == nil { -- handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition) -- tp.handlers[msg.Partition] = handler -- } -- -- handler <- msg -- } -- -- for _, handler := range tp.handlers { -- close(handler) -- } --} -- --func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error { -- var partitions []int32 -- -- err := tp.breaker.Run(func() (err error) { -- if tp.partitioner.RequiresConsistency() { -- partitions, err = tp.parent.client.Partitions(msg.Topic) -- } else { -- partitions, err = tp.parent.client.WritablePartitions(msg.Topic) -- } -- return -- }) -- -- if err != nil { -- return err -- } -- -- numPartitions := int32(len(partitions)) -- -- if numPartitions == 0 { -- return ErrLeaderNotAvailable -- } -- -- choice, err := tp.partitioner.Partition(msg, numPartitions) -- -- if err != nil { -- return err -- } else if choice < 0 || choice >= numPartitions { -- return ErrInvalidPartition -- } -- -- msg.Partition = partitions[choice] -- -- return nil --} -- --// one per partition per topic --// dispatches messages to the appropriate broker --// also responsible for maintaining message order during retries --type partitionProducer struct { -- parent *asyncProducer -- topic string -- partition int32 -- input <-chan *ProducerMessage -- -- leader *Broker -- breaker *breaker.Breaker -- output chan<- *ProducerMessage -- -- // highWatermark tracks the ""current"" retry level, which is the only one where we actually let messages through, -- // all other messages get buffered in retryState[msg.retries].buf to preserve ordering -- // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and -- // therefore whether our buffer is complete and safe to flush) -- highWatermark int -- retryState []partitionRetryState --} -- --type partitionRetryState struct { -- buf []*ProducerMessage -- expectChaser bool --} -- --func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage { -- input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) -- pp := &partitionProducer{ -- parent: p, -- topic: topic, -- partition: partition, -- input: input, -- -- breaker: breaker.New(3, 1, 10*time.Second), -- retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1), -- } -- go withRecover(pp.dispatch) -- return input --} -- --func (pp *partitionProducer) dispatch() { -- // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader` -- // on the first message -- pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition) -- if pp.leader != nil { -- pp.output = pp.parent.getBrokerProducer(pp.leader) -- pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight -- pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} -- } -- -- for msg := range pp.input { -- if msg.retries > pp.highWatermark { -- // a new, higher, retry level; handle it and then back off -- pp.newHighWatermark(msg.retries) -- time.Sleep(pp.parent.conf.Producer.Retry.Backoff) -- } else if pp.highWatermark > 0 { -- // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level -- if msg.retries < pp.highWatermark { -- // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin) -- if msg.flags&fin == fin { -- pp.retryState[msg.retries].expectChaser = false -- pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected -- } else { -- pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg) -- } -- continue -- } else if msg.flags&fin == fin { -- // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set, -- // meaning this retry level is done and we can go down (at least) one level and flush that -- pp.retryState[pp.highWatermark].expectChaser = false -- pp.flushRetryBuffers() -- pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected -- continue -- } -- } -- -- // if we made it this far then the current msg contains real data, and can be sent to the next goroutine -- // without breaking any of our ordering guarantees -- -- if pp.output == nil { -- if err := pp.updateLeader(); err != nil { -- pp.parent.returnError(msg, err) -- time.Sleep(pp.parent.conf.Producer.Retry.Backoff) -- continue -- } -- Logger.Printf(""producer/leader/%s/%d selected broker %d\n"", pp.topic, pp.partition, pp.leader.ID()) -- } -- -- pp.output <- msg -- } -- -- if pp.output != nil { -- pp.parent.unrefBrokerProducer(pp.leader, pp.output) -- } --} -- --func (pp *partitionProducer) newHighWatermark(hwm int) { -- Logger.Printf(""producer/leader/%s/%d state change to [retrying-%d]\n"", pp.topic, pp.partition, hwm) -- pp.highWatermark = hwm -- -- // send off a fin so that we know when everything ""in between"" has made it -- // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages) -- pp.retryState[pp.highWatermark].expectChaser = true -- pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight -- pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1} -- -- // a new HWM means that our current broker selection is out of date -- Logger.Printf(""producer/leader/%s/%d abandoning broker %d\n"", pp.topic, pp.partition, pp.leader.ID()) -- pp.parent.unrefBrokerProducer(pp.leader, pp.output) -- pp.output = nil --} -- --func (pp *partitionProducer) flushRetryBuffers() { -- Logger.Printf(""producer/leader/%s/%d state change to [flushing-%d]\n"", pp.topic, pp.partition, pp.highWatermark) -- for { -- pp.highWatermark-- -- -- if pp.output == nil { -- if err := pp.updateLeader(); err != nil { -- pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err) -- goto flushDone -- } -- Logger.Printf(""producer/leader/%s/%d selected broker %d\n"", pp.topic, pp.partition, pp.leader.ID()) -- } -- -- for _, msg := range pp.retryState[pp.highWatermark].buf { -- pp.output <- msg -- } -- -- flushDone: -- pp.retryState[pp.highWatermark].buf = nil -- if pp.retryState[pp.highWatermark].expectChaser { -- Logger.Printf(""producer/leader/%s/%d state change to [retrying-%d]\n"", pp.topic, pp.partition, pp.highWatermark) -- break -- } else if pp.highWatermark == 0 { -- Logger.Printf(""producer/leader/%s/%d state change to [normal]\n"", pp.topic, pp.partition) -- break -- } -- } --} -- --func (pp *partitionProducer) updateLeader() error { -- return pp.breaker.Run(func() (err error) { -- if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil { -- return err -- } -- -- if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil { -- return err -- } -- -- pp.output = pp.parent.getBrokerProducer(pp.leader) -- pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight -- pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} -- -- return nil -- }) --} -- --// one per broker; also constructs an associated flusher --func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage { -- var ( -- input = make(chan *ProducerMessage) -- bridge = make(chan *produceSet) -- responses = make(chan *brokerProducerResponse) -- ) -- -- bp := &brokerProducer{ -- parent: p, -- broker: broker, -- input: input, -- output: bridge, -- responses: responses, -- buffer: newProduceSet(p), -- currentRetries: make(map[string]map[int32]error), -- } -- go withRecover(bp.run) -- -- // minimal bridge to make the network response `select`able -- go withRecover(func() { -- for set := range bridge { -- request := set.buildRequest() -- -- response, err := broker.Produce(request) -- -- responses <- &brokerProducerResponse{ -- set: set, -- err: err, -- res: response, -- } -- } -- close(responses) -- }) -- -- return input --} -- --type brokerProducerResponse struct { -- set *produceSet -- err error -- res *ProduceResponse --} -- --// groups messages together into appropriately-sized batches for sending to the broker --// handles state related to retries etc --type brokerProducer struct { -- parent *asyncProducer -- broker *Broker -- -- input <-chan *ProducerMessage -- output chan<- *produceSet -- responses <-chan *brokerProducerResponse -- -- buffer *produceSet -- timer <-chan time.Time -- timerFired bool -- -- closing error -- currentRetries map[string]map[int32]error --} -- --func (bp *brokerProducer) run() { -- var output chan<- *produceSet -- Logger.Printf(""producer/broker/%d starting up\n"", bp.broker.ID()) -- -- for { -- select { -- case msg := <-bp.input: -- if msg == nil { -- bp.shutdown() -- return -- } -- -- if msg.flags&syn == syn { -- Logger.Printf(""producer/broker/%d state change to [open] on %s/%d\n"", -- bp.broker.ID(), msg.Topic, msg.Partition) -- if bp.currentRetries[msg.Topic] == nil { -- bp.currentRetries[msg.Topic] = make(map[int32]error) -- } -- bp.currentRetries[msg.Topic][msg.Partition] = nil -- bp.parent.inFlight.Done() -- continue -- } -- -- if reason := bp.needsRetry(msg); reason != nil { -- bp.parent.retryMessage(msg, reason) -- -- if bp.closing == nil && msg.flags&fin == fin { -- // we were retrying this partition but we can start processing again -- delete(bp.currentRetries[msg.Topic], msg.Partition) -- Logger.Printf(""producer/broker/%d state change to [closed] on %s/%d\n"", -- bp.broker.ID(), msg.Topic, msg.Partition) -- } -- -- continue -- } -- -- if bp.buffer.wouldOverflow(msg) { -- if err := bp.waitForSpace(msg); err != nil { -- bp.parent.retryMessage(msg, err) -- continue -- } -- } -- -- if err := bp.buffer.add(msg); err != nil { -- bp.parent.returnError(msg, err) -- continue -- } -- -- if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil { -- bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency) -- } -- case <-bp.timer: -- bp.timerFired = true -- case output <- bp.buffer: -- bp.rollOver() -- case response := <-bp.responses: -- bp.handleResponse(response) -- } -- -- if bp.timerFired || bp.buffer.readyToFlush() { -- output = bp.output -- } else { -- output = nil -- } -- } --} -- --func (bp *brokerProducer) shutdown() { -- for !bp.buffer.empty() { -- select { -- case response := <-bp.responses: -- bp.handleResponse(response) -- case bp.output <- bp.buffer: -- bp.rollOver() -- } -- } -- close(bp.output) -- for response := range bp.responses { -- bp.handleResponse(response) -- } -- -- Logger.Printf(""producer/broker/%d shut down\n"", bp.broker.ID()) --} -- --func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error { -- if bp.closing != nil { -- return bp.closing -- } -- -- return bp.currentRetries[msg.Topic][msg.Partition] --} -- --func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error { -- Logger.Printf(""producer/broker/%d maximum request accumulated, waiting for space\n"", bp.broker.ID()) -- -- for { -- select { -- case response := <-bp.responses: -- bp.handleResponse(response) -- // handling a response can change our state, so re-check some things -- if reason := bp.needsRetry(msg); reason != nil { -- return reason -- } else if !bp.buffer.wouldOverflow(msg) { -- return nil -- } -- case bp.output <- bp.buffer: -- bp.rollOver() -- return nil -- } -- } --} -- --func (bp *brokerProducer) rollOver() { -- bp.timer = nil -- bp.timerFired = false -- bp.buffer = newProduceSet(bp.parent) --} -- --func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) { -- if response.err != nil { -- bp.handleError(response.set, response.err) -- } else { -- bp.handleSuccess(response.set, response.res) -- } -- -- if bp.buffer.empty() { -- bp.rollOver() // this can happen if the response invalidated our buffer -- } --} -- --func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) { -- // we iterate through the blocks in the request set, not the response, so that we notice -- // if the response is missing a block completely -- sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { -- if response == nil { -- // this only happens when RequiredAcks is NoResponse, so we have to assume success -- bp.parent.returnSuccesses(msgs) -- return -- } -- -- block := response.GetBlock(topic, partition) -- if block == nil { -- bp.parent.returnErrors(msgs, ErrIncompleteResponse) -- return -- } -- -- switch block.Err { -- // Success -- case ErrNoError: -- if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() { -- for _, msg := range msgs { -- msg.Timestamp = block.Timestamp -- } -- } -- for i, msg := range msgs { -- msg.Offset = block.Offset + int64(i) -- } -- bp.parent.returnSuccesses(msgs) -- // Retriable errors -- case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, -- ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: -- Logger.Printf(""producer/broker/%d state change to [retrying] on %s/%d because %v\n"", -- bp.broker.ID(), topic, partition, block.Err) -- bp.currentRetries[topic][partition] = block.Err -- bp.parent.retryMessages(msgs, block.Err) -- bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err) -- // Other non-retriable errors -- default: -- bp.parent.returnErrors(msgs, block.Err) -- } -- }) --} -- --func (bp *brokerProducer) handleError(sent *produceSet, err error) { -- switch err.(type) { -- case PacketEncodingError: -- sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { -- bp.parent.returnErrors(msgs, err) -- }) -- default: -- Logger.Printf(""producer/broker/%d state change to [closing] because %s\n"", bp.broker.ID(), err) -- bp.parent.abandonBrokerConnection(bp.broker) -- _ = bp.broker.Close() -- bp.closing = err -- sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { -- bp.parent.retryMessages(msgs, err) -- }) -- bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { -- bp.parent.retryMessages(msgs, err) -- }) -- bp.rollOver() -- } --} -- --// singleton --// effectively a ""bridge"" between the flushers and the dispatcher in order to avoid deadlock --// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel --func (p *asyncProducer) retryHandler() { -- var msg *ProducerMessage -- buf := queue.New() -- -- for { -- if buf.Length() == 0 { -- msg = <-p.retries -- } else { -- select { -- case msg = <-p.retries: -- case p.input <- buf.Peek().(*ProducerMessage): -- buf.Remove() -- continue -- } -- } -- -- if msg == nil { -- return -- } -- -- buf.Add(msg) -- } --} -- --// utility functions -- --func (p *asyncProducer) shutdown() { -- Logger.Println(""Producer shutting down."") -- p.inFlight.Add(1) -- p.input <- &ProducerMessage{flags: shutdown} -- -- p.inFlight.Wait() -- -- if p.ownClient { -- err := p.client.Close() -- if err != nil { -- Logger.Println(""producer/shutdown failed to close the embedded client:"", err) -- } -- } -- -- close(p.input) -- close(p.retries) -- close(p.errors) -- close(p.successes) --} -- --func (p *asyncProducer) returnError(msg *ProducerMessage, err error) { -- msg.clear() -- pErr := &ProducerError{Msg: msg, Err: err} -- if p.conf.Producer.Return.Errors { -- p.errors <- pErr -- } else { -- Logger.Println(pErr) -- } -- p.inFlight.Done() --} -- --func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) { -- for _, msg := range batch { -- p.returnError(msg, err) -- } --} -- --func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) { -- for _, msg := range batch { -- if p.conf.Producer.Return.Successes { -- msg.clear() -- p.successes <- msg -- } -- p.inFlight.Done() -- } --} -- --func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) { -- if msg.retries >= p.conf.Producer.Retry.Max { -- p.returnError(msg, err) -- } else { -- msg.retries++ -- p.retries <- msg -- } --} -- --func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) { -- for _, msg := range batch { -- p.retryMessage(msg, err) -- } --} -- --func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage { -- p.brokerLock.Lock() -- defer p.brokerLock.Unlock() -- -- bp := p.brokers[broker] -- -- if bp == nil { -- bp = p.newBrokerProducer(broker) -- p.brokers[broker] = bp -- p.brokerRefs[bp] = 0 -- } -- -- p.brokerRefs[bp]++ -- -- return bp --} -- --func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) { -- p.brokerLock.Lock() -- defer p.brokerLock.Unlock() -- -- p.brokerRefs[bp]-- -- if p.brokerRefs[bp] == 0 { -- close(bp) -- delete(p.brokerRefs, bp) -- -- if p.brokers[broker] == bp { -- delete(p.brokers, broker) -- } -- } --} -- --func (p *asyncProducer) abandonBrokerConnection(broker *Broker) { -- p.brokerLock.Lock() -- defer p.brokerLock.Unlock() -- -- delete(p.brokers, broker) --} -diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go -deleted file mode 100644 -index b759f8f7841c6..0000000000000 ---- a/vendor/github.com/Shopify/sarama/broker.go -+++ /dev/null -@@ -1,823 +0,0 @@ --package sarama -- --import ( -- ""crypto/tls"" -- ""encoding/binary"" -- ""fmt"" -- ""io"" -- ""net"" -- ""strconv"" -- ""sync"" -- ""sync/atomic"" -- ""time"" -- -- ""github.com/rcrowley/go-metrics"" --) -- --// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe. --type Broker struct { -- id int32 -- addr string -- -- conf *Config -- correlationID int32 -- conn net.Conn -- connErr error -- lock sync.Mutex -- opened int32 -- -- responses chan responsePromise -- done chan bool -- -- incomingByteRate metrics.Meter -- requestRate metrics.Meter -- requestSize metrics.Histogram -- requestLatency metrics.Histogram -- outgoingByteRate metrics.Meter -- responseRate metrics.Meter -- responseSize metrics.Histogram -- brokerIncomingByteRate metrics.Meter -- brokerRequestRate metrics.Meter -- brokerRequestSize metrics.Histogram -- brokerRequestLatency metrics.Histogram -- brokerOutgoingByteRate metrics.Meter -- brokerResponseRate metrics.Meter -- brokerResponseSize metrics.Histogram --} -- --type responsePromise struct { -- requestTime time.Time -- correlationID int32 -- packets chan []byte -- errors chan error --} -- --// NewBroker creates and returns a Broker targeting the given host:port address. --// This does not attempt to actually connect, you have to call Open() for that. --func NewBroker(addr string) *Broker { -- return &Broker{id: -1, addr: addr} --} -- --// Open tries to connect to the Broker if it is not already connected or connecting, but does not block --// waiting for the connection to complete. This means that any subsequent operations on the broker will --// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call, --// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or --// AlreadyConnected. If conf is nil, the result of NewConfig() is used. --func (b *Broker) Open(conf *Config) error { -- if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) { -- return ErrAlreadyConnected -- } -- -- if conf == nil { -- conf = NewConfig() -- } -- -- err := conf.Validate() -- if err != nil { -- return err -- } -- -- b.lock.Lock() -- -- go withRecover(func() { -- defer b.lock.Unlock() -- -- dialer := net.Dialer{ -- Timeout: conf.Net.DialTimeout, -- KeepAlive: conf.Net.KeepAlive, -- } -- -- if conf.Net.TLS.Enable { -- b.conn, b.connErr = tls.DialWithDialer(&dialer, ""tcp"", b.addr, conf.Net.TLS.Config) -- } else { -- b.conn, b.connErr = dialer.Dial(""tcp"", b.addr) -- } -- if b.connErr != nil { -- Logger.Printf(""Failed to connect to broker %s: %s\n"", b.addr, b.connErr) -- b.conn = nil -- atomic.StoreInt32(&b.opened, 0) -- return -- } -- b.conn = newBufConn(b.conn) -- -- b.conf = conf -- -- // Create or reuse the global metrics shared between brokers -- b.incomingByteRate = metrics.GetOrRegisterMeter(""incoming-byte-rate"", conf.MetricRegistry) -- b.requestRate = metrics.GetOrRegisterMeter(""request-rate"", conf.MetricRegistry) -- b.requestSize = getOrRegisterHistogram(""request-size"", conf.MetricRegistry) -- b.requestLatency = getOrRegisterHistogram(""request-latency-in-ms"", conf.MetricRegistry) -- b.outgoingByteRate = metrics.GetOrRegisterMeter(""outgoing-byte-rate"", conf.MetricRegistry) -- b.responseRate = metrics.GetOrRegisterMeter(""response-rate"", conf.MetricRegistry) -- b.responseSize = getOrRegisterHistogram(""response-size"", conf.MetricRegistry) -- // Do not gather metrics for seeded broker (only used during bootstrap) because they share -- // the same id (-1) and are already exposed through the global metrics above -- if b.id >= 0 { -- b.brokerIncomingByteRate = getOrRegisterBrokerMeter(""incoming-byte-rate"", b, conf.MetricRegistry) -- b.brokerRequestRate = getOrRegisterBrokerMeter(""request-rate"", b, conf.MetricRegistry) -- b.brokerRequestSize = getOrRegisterBrokerHistogram(""request-size"", b, conf.MetricRegistry) -- b.brokerRequestLatency = getOrRegisterBrokerHistogram(""request-latency-in-ms"", b, conf.MetricRegistry) -- b.brokerOutgoingByteRate = getOrRegisterBrokerMeter(""outgoing-byte-rate"", b, conf.MetricRegistry) -- b.brokerResponseRate = getOrRegisterBrokerMeter(""response-rate"", b, conf.MetricRegistry) -- b.brokerResponseSize = getOrRegisterBrokerHistogram(""response-size"", b, conf.MetricRegistry) -- } -- -- if conf.Net.SASL.Enable { -- b.connErr = b.sendAndReceiveSASLPlainAuth() -- if b.connErr != nil { -- err = b.conn.Close() -- if err == nil { -- Logger.Printf(""Closed connection to broker %s\n"", b.addr) -- } else { -- Logger.Printf(""Error while closing connection to broker %s: %s\n"", b.addr, err) -- } -- b.conn = nil -- atomic.StoreInt32(&b.opened, 0) -- return -- } -- } -- -- b.done = make(chan bool) -- b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1) -- -- if b.id >= 0 { -- Logger.Printf(""Connected to broker at %s (registered as #%d)\n"", b.addr, b.id) -- } else { -- Logger.Printf(""Connected to broker at %s (unregistered)\n"", b.addr) -- } -- go withRecover(b.responseReceiver) -- }) -- -- return nil --} -- --// Connected returns true if the broker is connected and false otherwise. If the broker is not --// connected but it had tried to connect, the error from that connection attempt is also returned. --func (b *Broker) Connected() (bool, error) { -- b.lock.Lock() -- defer b.lock.Unlock() -- -- return b.conn != nil, b.connErr --} -- --func (b *Broker) Close() error { -- b.lock.Lock() -- defer b.lock.Unlock() -- -- if b.conn == nil { -- return ErrNotConnected -- } -- -- close(b.responses) -- <-b.done -- -- err := b.conn.Close() -- -- b.conn = nil -- b.connErr = nil -- b.done = nil -- b.responses = nil -- -- if b.id >= 0 { -- b.conf.MetricRegistry.Unregister(getMetricNameForBroker(""incoming-byte-rate"", b)) -- b.conf.MetricRegistry.Unregister(getMetricNameForBroker(""request-rate"", b)) -- b.conf.MetricRegistry.Unregister(getMetricNameForBroker(""outgoing-byte-rate"", b)) -- b.conf.MetricRegistry.Unregister(getMetricNameForBroker(""response-rate"", b)) -- } -- -- if err == nil { -- Logger.Printf(""Closed connection to broker %s\n"", b.addr) -- } else { -- Logger.Printf(""Error while closing connection to broker %s: %s\n"", b.addr, err) -- } -- -- atomic.StoreInt32(&b.opened, 0) -- -- return err --} -- --// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known. --func (b *Broker) ID() int32 { -- return b.id --} -- --// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker. --func (b *Broker) Addr() string { -- return b.addr --} -- --func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) { -- response := new(MetadataResponse) -- -- err := b.sendAndReceive(request, response) -- -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) { -- response := new(ConsumerMetadataResponse) -- -- err := b.sendAndReceive(request, response) -- -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) { -- response := new(OffsetResponse) -- -- err := b.sendAndReceive(request, response) -- -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { -- var response *ProduceResponse -- var err error -- -- if request.RequiredAcks == NoResponse { -- err = b.sendAndReceive(request, nil) -- } else { -- response = new(ProduceResponse) -- err = b.sendAndReceive(request, response) -- } -- -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { -- response := new(FetchResponse) -- -- err := b.sendAndReceive(request, response) -- -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) { -- response := new(OffsetCommitResponse) -- -- err := b.sendAndReceive(request, response) -- -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) { -- response := new(OffsetFetchResponse) -- -- err := b.sendAndReceive(request, response) -- -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) { -- response := new(JoinGroupResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) { -- response := new(SyncGroupResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) { -- response := new(LeaveGroupResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) { -- response := new(HeartbeatResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) { -- response := new(ListGroupsResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) { -- response := new(DescribeGroupsResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) { -- response := new(ApiVersionsResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) { -- response := new(CreateTopicsResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) { -- response := new(DeleteTopicsResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) { -- response := new(DescribeAclsResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) { -- response := new(CreateAclsResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) { -- response := new(DeleteAclsResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) { -- response := new(InitProducerIDResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) { -- response := new(AddPartitionsToTxnResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) { -- response := new(AddOffsetsToTxnResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) { -- response := new(EndTxnResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) { -- response := new(TxnOffsetCommitResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) { -- response := new(DescribeConfigsResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} -- --func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) { -- response := new(AlterConfigsResponse) -- -- err := b.sendAndReceive(request, response) -- if err != nil { -- return nil, err -- } -- -- return response, nil --} --func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) { -- b.lock.Lock() -- defer b.lock.Unlock() -- -- if b.conn == nil { -- if b.connErr != nil { -- return nil, b.connErr -- } -- return nil, ErrNotConnected -- } -- -- if !b.conf.Version.IsAtLeast(rb.requiredVersion()) { -- return nil, ErrUnsupportedVersion -- } -- -- req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} -- buf, err := encode(req, b.conf.MetricRegistry) -- if err != nil { -- return nil, err -- } -- -- err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) -- if err != nil { -- return nil, err -- } -- -- requestTime := time.Now() -- bytes, err := b.conn.Write(buf) -- b.updateOutgoingCommunicationMetrics(bytes) -- if err != nil { -- return nil, err -- } -- b.correlationID++ -- -- if !promiseResponse { -- // Record request latency without the response -- b.updateRequestLatencyMetrics(time.Since(requestTime)) -- return nil, nil -- } -- -- promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)} -- b.responses <- promise -- -- return &promise, nil --} -- --func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error { -- promise, err := b.send(req, res != nil) -- -- if err != nil { -- return err -- } -- -- if promise == nil { -- return nil -- } -- -- select { -- case buf := <-promise.packets: -- return versionedDecode(buf, res, req.version()) -- case err = <-promise.errors: -- return err -- } --} -- --func (b *Broker) decode(pd packetDecoder) (err error) { -- b.id, err = pd.getInt32() -- if err != nil { -- return err -- } -- -- host, err := pd.getString() -- if err != nil { -- return err -- } -- -- port, err := pd.getInt32() -- if err != nil { -- return err -- } -- -- b.addr = net.JoinHostPort(host, fmt.Sprint(port)) -- if _, _, err := net.SplitHostPort(b.addr); err != nil { -- return err -- } -- -- return nil --} -- --func (b *Broker) encode(pe packetEncoder) (err error) { -- -- host, portstr, err := net.SplitHostPort(b.addr) -- if err != nil { -- return err -- } -- port, err := strconv.Atoi(portstr) -- if err != nil { -- return err -- } -- -- pe.putInt32(b.id) -- -- err = pe.putString(host) -- if err != nil { -- return err -- } -- -- pe.putInt32(int32(port)) -- -- return nil --} -- --func (b *Broker) responseReceiver() { -- var dead error -- header := make([]byte, 8) -- for response := range b.responses { -- if dead != nil { -- response.errors <- dead -- continue -- } -- -- err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)) -- if err != nil { -- dead = err -- response.errors <- err -- continue -- } -- -- bytesReadHeader, err := io.ReadFull(b.conn, header) -- requestLatency := time.Since(response.requestTime) -- if err != nil { -- b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) -- dead = err -- response.errors <- err -- continue -- } -- -- decodedHeader := responseHeader{} -- err = decode(header, &decodedHeader) -- if err != nil { -- b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) -- dead = err -- response.errors <- err -- continue -- } -- if decodedHeader.correlationID != response.correlationID { -- b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) -- // TODO if decoded ID < cur ID, discard until we catch up -- // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response -- dead = PacketDecodingError{fmt.Sprintf(""correlation ID didn't match, wanted %d, got %d"", response.correlationID, decodedHeader.correlationID)} -- response.errors <- dead -- continue -- } -- -- buf := make([]byte, decodedHeader.length-4) -- bytesReadBody, err := io.ReadFull(b.conn, buf) -- b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency) -- if err != nil { -- dead = err -- response.errors <- err -- continue -- } -- -- response.packets <- buf -- } -- close(b.done) --} -- --func (b *Broker) sendAndReceiveSASLPlainHandshake() error { -- rb := &SaslHandshakeRequest{""PLAIN""} -- req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} -- buf, err := encode(req, b.conf.MetricRegistry) -- if err != nil { -- return err -- } -- -- err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) -- if err != nil { -- return err -- } -- -- requestTime := time.Now() -- bytes, err := b.conn.Write(buf) -- b.updateOutgoingCommunicationMetrics(bytes) -- if err != nil { -- Logger.Printf(""Failed to send SASL handshake %s: %s\n"", b.addr, err.Error()) -- return err -- } -- b.correlationID++ -- //wait for the response -- header := make([]byte, 8) // response header -- _, err = io.ReadFull(b.conn, header) -- if err != nil { -- Logger.Printf(""Failed to read SASL handshake header : %s\n"", err.Error()) -- return err -- } -- length := binary.BigEndian.Uint32(header[:4]) -- payload := make([]byte, length-4) -- n, err := io.ReadFull(b.conn, payload) -- if err != nil { -- Logger.Printf(""Failed to read SASL handshake payload : %s\n"", err.Error()) -- return err -- } -- b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime)) -- res := &SaslHandshakeResponse{} -- err = versionedDecode(payload, res, 0) -- if err != nil { -- Logger.Printf(""Failed to parse SASL handshake : %s\n"", err.Error()) -- return err -- } -- if res.Err != ErrNoError { -- Logger.Printf(""Invalid SASL Mechanism : %s\n"", res.Err.Error()) -- return res.Err -- } -- Logger.Print(""Successful SASL handshake"") -- return nil --} -- --// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149) --// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9 --// --// In SASL Plain, Kafka expects the auth header to be in the following format --// Message format (from https://tools.ietf.org/html/rfc4616): --// --// message = [authzid] UTF8NUL authcid UTF8NUL passwd --// authcid = 1*SAFE ; MUST accept up to 255 octets --// authzid = 1*SAFE ; MUST accept up to 255 octets --// passwd = 1*SAFE ; MUST accept up to 255 octets --// UTF8NUL = %x00 ; UTF-8 encoded NUL character --// --// SAFE = UTF1 / UTF2 / UTF3 / UTF4 --// ;; any UTF-8 encoded Unicode character except NUL --// --// When credentials are valid, Kafka returns a 4 byte array of null characters. --// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way --// of responding to bad credentials but thats how its being done today. --func (b *Broker) sendAndReceiveSASLPlainAuth() error { -- if b.conf.Net.SASL.Handshake { -- handshakeErr := b.sendAndReceiveSASLPlainHandshake() -- if handshakeErr != nil { -- Logger.Printf(""Error while performing SASL handshake %s\n"", b.addr) -- return handshakeErr -- } -- } -- length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) -- authBytes := make([]byte, length+4) //4 byte length header + auth data -- binary.BigEndian.PutUint32(authBytes, uint32(length)) -- copy(authBytes[4:], []byte(""\x00""+b.conf.Net.SASL.User+""\x00""+b.conf.Net.SASL.Password)) -- -- err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) -- if err != nil { -- Logger.Printf(""Failed to set write deadline when doing SASL auth with broker %s: %s\n"", b.addr, err.Error()) -- return err -- } -- -- requestTime := time.Now() -- bytesWritten, err := b.conn.Write(authBytes) -- b.updateOutgoingCommunicationMetrics(bytesWritten) -- if err != nil { -- Logger.Printf(""Failed to write SASL auth header to broker %s: %s\n"", b.addr, err.Error()) -- return err -- } -- -- header := make([]byte, 4) -- n, err := io.ReadFull(b.conn, header) -- b.updateIncomingCommunicationMetrics(n, time.Since(requestTime)) -- // If the credentials are valid, we would get a 4 byte response filled with null characters. -- // Otherwise, the broker closes the connection and we get an EOF -- if err != nil { -- Logger.Printf(""Failed to read response while authenticating with SASL to broker %s: %s\n"", b.addr, err.Error()) -- return err -- } -- -- Logger.Printf(""SASL authentication successful with broker %s:%v - %v\n"", b.addr, n, header) -- return nil --} -- --func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) { -- b.updateRequestLatencyMetrics(requestLatency) -- b.responseRate.Mark(1) -- if b.brokerResponseRate != nil { -- b.brokerResponseRate.Mark(1) -- } -- responseSize := int64(bytes) -- b.incomingByteRate.Mark(responseSize) -- if b.brokerIncomingByteRate != nil { -- b.brokerIncomingByteRate.Mark(responseSize) -- } -- b.responseSize.Update(responseSize) -- if b.brokerResponseSize != nil { -- b.brokerResponseSize.Update(responseSize) -- } --} -- --func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) { -- requestLatencyInMs := int64(requestLatency / time.Millisecond) -- b.requestLatency.Update(requestLatencyInMs) -- if b.brokerRequestLatency != nil { -- b.brokerRequestLatency.Update(requestLatencyInMs) -- } --} -- --func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { -- b.requestRate.Mark(1) -- if b.brokerRequestRate != nil { -- b.brokerRequestRate.Mark(1) -- } -- requestSize := int64(bytes) -- b.outgoingByteRate.Mark(requestSize) -- if b.brokerOutgoingByteRate != nil { -- b.brokerOutgoingByteRate.Mark(requestSize) -- } -- b.requestSize.Update(requestSize) -- if b.brokerRequestSize != nil { -- b.brokerRequestSize.Update(requestSize) -- } --} -diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go -deleted file mode 100644 -index 3dbfc4b06ffb1..0000000000000 ---- a/vendor/github.com/Shopify/sarama/client.go -+++ /dev/null -@@ -1,794 +0,0 @@ --package sarama -- --import ( -- ""math/rand"" -- ""sort"" -- ""sync"" -- ""time"" --) -- --// Client is a generic Kafka client. It manages connections to one or more Kafka brokers. --// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected --// automatically when it passes out of scope. It is safe to share a client amongst many --// users, however Kafka will process requests from a single client strictly in serial, --// so it is generally more efficient to use the default one client per producer/consumer. --type Client interface { -- // Config returns the Config struct of the client. This struct should not be -- // altered after it has been created. -- Config() *Config -- -- // Brokers returns the current set of active brokers as retrieved from cluster metadata. -- Brokers() []*Broker -- -- // Topics returns the set of available topics as retrieved from cluster metadata. -- Topics() ([]string, error) -- -- // Partitions returns the sorted list of all partition IDs for the given topic. -- Partitions(topic string) ([]int32, error) -- -- // WritablePartitions returns the sorted list of all writable partition IDs for -- // the given topic, where ""writable"" means ""having a valid leader accepting -- // writes"". -- WritablePartitions(topic string) ([]int32, error) -- -- // Leader returns the broker object that is the leader of the current -- // topic/partition, as determined by querying the cluster metadata. -- Leader(topic string, partitionID int32) (*Broker, error) -- -- // Replicas returns the set of all replica IDs for the given partition. -- Replicas(topic string, partitionID int32) ([]int32, error) -- -- // InSyncReplicas returns the set of all in-sync replica IDs for the given -- // partition. In-sync replicas are replicas which are fully caught up with -- // the partition leader. -- InSyncReplicas(topic string, partitionID int32) ([]int32, error) -- -- // RefreshMetadata takes a list of topics and queries the cluster to refresh the -- // available metadata for those topics. If no topics are provided, it will refresh -- // metadata for all topics. -- RefreshMetadata(topics ...string) error -- -- // GetOffset queries the cluster to get the most recent available offset at the -- // given time (in milliseconds) on the topic/partition combination. -- // Time should be OffsetOldest for the earliest available offset, -- // OffsetNewest for the offset of the message that will be produced next, or a time. -- GetOffset(topic string, partitionID int32, time int64) (int64, error) -- -- // Coordinator returns the coordinating broker for a consumer group. It will -- // return a locally cached value if it's available. You can call -- // RefreshCoordinator to update the cached value. This function only works on -- // Kafka 0.8.2 and higher. -- Coordinator(consumerGroup string) (*Broker, error) -- -- // RefreshCoordinator retrieves the coordinator for a consumer group and stores it -- // in local cache. This function only works on Kafka 0.8.2 and higher. -- RefreshCoordinator(consumerGroup string) error -- -- // Close shuts down all broker connections managed by this client. It is required -- // to call this function before a client object passes out of scope, as it will -- // otherwise leak memory. You must close any Producers or Consumers using a client -- // before you close the client. -- Close() error -- -- // Closed returns true if the client has already had Close called on it -- Closed() bool --} -- --const ( -- // OffsetNewest stands for the log head offset, i.e. the offset that will be -- // assigned to the next message that will be produced to the partition. You -- // can send this to a client's GetOffset method to get this offset, or when -- // calling ConsumePartition to start consuming new messages. -- OffsetNewest int64 = -1 -- // OffsetOldest stands for the oldest offset available on the broker for a -- // partition. You can send this to a client's GetOffset method to get this -- // offset, or when calling ConsumePartition to start consuming from the -- // oldest offset that is still available on the broker. -- OffsetOldest int64 = -2 --) -- --type client struct { -- conf *Config -- closer, closed chan none // for shutting down background metadata updater -- -- // the broker addresses given to us through the constructor are not guaranteed to be returned in -- // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?) -- // so we store them separately -- seedBrokers []*Broker -- deadSeeds []*Broker -- -- brokers map[int32]*Broker // maps broker ids to brokers -- metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata -- coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs -- -- // If the number of partitions is large, we can get some churn calling cachedPartitions, -- // so the result is cached. It is important to update this value whenever metadata is changed -- cachedPartitionsResults map[string][maxPartitionIndex][]int32 -- -- lock sync.RWMutex // protects access to the maps that hold cluster state. --} -- --// NewClient creates a new Client. It connects to one of the given broker addresses --// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot --// be retrieved from any of the given broker addresses, the client is not created. --func NewClient(addrs []string, conf *Config) (Client, error) { -- Logger.Println(""Initializing new client"") -- -- if conf == nil { -- conf = NewConfig() -- } -- -- if err := conf.Validate(); err != nil { -- return nil, err -- } -- -- if len(addrs) < 1 { -- return nil, ConfigurationError(""You must provide at least one broker address"") -- } -- -- client := &client{ -- conf: conf, -- closer: make(chan none), -- closed: make(chan none), -- brokers: make(map[int32]*Broker), -- metadata: make(map[string]map[int32]*PartitionMetadata), -- cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32), -- coordinators: make(map[string]int32), -- } -- -- random := rand.New(rand.NewSource(time.Now().UnixNano())) -- for _, index := range random.Perm(len(addrs)) { -- client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) -- } -- -- if conf.Metadata.Full { -- // do an initial fetch of all cluster metadata by specifying an empty list of topics -- err := client.RefreshMetadata() -- switch err { -- case nil: -- break -- case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed: -- // indicates that maybe part of the cluster is down, but is not fatal to creating the client -- Logger.Println(err) -- default: -- close(client.closed) // we haven't started the background updater yet, so we have to do this manually -- _ = client.Close() -- return nil, err -- } -- } -- go withRecover(client.backgroundMetadataUpdater) -- -- Logger.Println(""Successfully initialized new client"") -- -- return client, nil --} -- --func (client *client) Config() *Config { -- return client.conf --} -- --func (client *client) Brokers() []*Broker { -- client.lock.RLock() -- defer client.lock.RUnlock() -- brokers := make([]*Broker, 0) -- for _, broker := range client.brokers { -- brokers = append(brokers, broker) -- } -- return brokers --} -- --func (client *client) Close() error { -- if client.Closed() { -- // Chances are this is being called from a defer() and the error will go unobserved -- // so we go ahead and log the event in this case. -- Logger.Printf(""Close() called on already closed client"") -- return ErrClosedClient -- } -- -- // shutdown and wait for the background thread before we take the lock, to avoid races -- close(client.closer) -- <-client.closed -- -- client.lock.Lock() -- defer client.lock.Unlock() -- Logger.Println(""Closing Client"") -- -- for _, broker := range client.brokers { -- safeAsyncClose(broker) -- } -- -- for _, broker := range client.seedBrokers { -- safeAsyncClose(broker) -- } -- -- client.brokers = nil -- client.metadata = nil -- -- return nil --} -- --func (client *client) Closed() bool { -- return client.brokers == nil --} -- --func (client *client) Topics() ([]string, error) { -- if client.Closed() { -- return nil, ErrClosedClient -- } -- -- client.lock.RLock() -- defer client.lock.RUnlock() -- -- ret := make([]string, 0, len(client.metadata)) -- for topic := range client.metadata { -- ret = append(ret, topic) -- } -- -- return ret, nil --} -- --func (client *client) Partitions(topic string) ([]int32, error) { -- if client.Closed() { -- return nil, ErrClosedClient -- } -- -- partitions := client.cachedPartitions(topic, allPartitions) -- -- if len(partitions) == 0 { -- err := client.RefreshMetadata(topic) -- if err != nil { -- return nil, err -- } -- partitions = client.cachedPartitions(topic, allPartitions) -- } -- -- if partitions == nil { -- return nil, ErrUnknownTopicOrPartition -- } -- -- return partitions, nil --} -- --func (client *client) WritablePartitions(topic string) ([]int32, error) { -- if client.Closed() { -- return nil, ErrClosedClient -- } -- -- partitions := client.cachedPartitions(topic, writablePartitions) -- -- // len==0 catches when it's nil (no such topic) and the odd case when every single -- // partition is undergoing leader election simultaneously. Callers have to be able to handle -- // this function returning an empty slice (which is a valid return value) but catching it -- // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers -- // a metadata refresh as a nicety so callers can just try again and don't have to manually -- // trigger a refresh (otherwise they'd just keep getting a stale cached copy). -- if len(partitions) == 0 { -- err := client.RefreshMetadata(topic) -- if err != nil { -- return nil, err -- } -- partitions = client.cachedPartitions(topic, writablePartitions) -- } -- -- if partitions == nil { -- return nil, ErrUnknownTopicOrPartition -- } -- -- return partitions, nil --} -- --func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) { -- if client.Closed() { -- return nil, ErrClosedClient -- } -- -- metadata := client.cachedMetadata(topic, partitionID) -- -- if metadata == nil { -- err := client.RefreshMetadata(topic) -- if err != nil { -- return nil, err -- } -- metadata = client.cachedMetadata(topic, partitionID) -- } -- -- if metadata == nil { -- return nil, ErrUnknownTopicOrPartition -- } -- -- if metadata.Err == ErrReplicaNotAvailable { -- return dupInt32Slice(metadata.Replicas), metadata.Err -- } -- return dupInt32Slice(metadata.Replicas), nil --} -- --func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) { -- if client.Closed() { -- return nil, ErrClosedClient -- } -- -- metadata := client.cachedMetadata(topic, partitionID) -- -- if metadata == nil { -- err := client.RefreshMetadata(topic) -- if err != nil { -- return nil, err -- } -- metadata = client.cachedMetadata(topic, partitionID) -- } -- -- if metadata == nil { -- return nil, ErrUnknownTopicOrPartition -- } -- -- if metadata.Err == ErrReplicaNotAvailable { -- return dupInt32Slice(metadata.Isr), metadata.Err -- } -- return dupInt32Slice(metadata.Isr), nil --} -- --func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { -- if client.Closed() { -- return nil, ErrClosedClient -- } -- -- leader, err := client.cachedLeader(topic, partitionID) -- -- if leader == nil { -- err = client.RefreshMetadata(topic) -- if err != nil { -- return nil, err -- } -- leader, err = client.cachedLeader(topic, partitionID) -- } -- -- return leader, err --} -- --func (client *client) RefreshMetadata(topics ...string) error { -- if client.Closed() { -- return ErrClosedClient -- } -- -- // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper -- // error. This handles the case by returning an error instead of sending it -- // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310 -- for _, topic := range topics { -- if len(topic) == 0 { -- return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return -- } -- } -- -- return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max) --} -- --func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) { -- if client.Closed() { -- return -1, ErrClosedClient -- } -- -- offset, err := client.getOffset(topic, partitionID, time) -- -- if err != nil { -- if err := client.RefreshMetadata(topic); err != nil { -- return -1, err -- } -- return client.getOffset(topic, partitionID, time) -- } -- -- return offset, err --} -- --func (client *client) Coordinator(consumerGroup string) (*Broker, error) { -- if client.Closed() { -- return nil, ErrClosedClient -- } -- -- coordinator := client.cachedCoordinator(consumerGroup) -- -- if coordinator == nil { -- if err := client.RefreshCoordinator(consumerGroup); err != nil { -- return nil, err -- } -- coordinator = client.cachedCoordinator(consumerGroup) -- } -- -- if coordinator == nil { -- return nil, ErrConsumerCoordinatorNotAvailable -- } -- -- _ = coordinator.Open(client.conf) -- return coordinator, nil --} -- --func (client *client) RefreshCoordinator(consumerGroup string) error { -- if client.Closed() { -- return ErrClosedClient -- } -- -- response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max) -- if err != nil { -- return err -- } -- -- client.lock.Lock() -- defer client.lock.Unlock() -- client.registerBroker(response.Coordinator) -- client.coordinators[consumerGroup] = response.Coordinator.ID() -- return nil --} -- --// private broker management helpers -- --// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered --// in the brokers map. It returns the broker that is registered, which may be the provided broker, --// or a previously registered Broker instance. You must hold the write lock before calling this function. --func (client *client) registerBroker(broker *Broker) { -- if client.brokers[broker.ID()] == nil { -- client.brokers[broker.ID()] = broker -- Logger.Printf(""client/brokers registered new broker #%d at %s"", broker.ID(), broker.Addr()) -- } else if broker.Addr() != client.brokers[broker.ID()].Addr() { -- safeAsyncClose(client.brokers[broker.ID()]) -- client.brokers[broker.ID()] = broker -- Logger.Printf(""client/brokers replaced registered broker #%d with %s"", broker.ID(), broker.Addr()) -- } --} -- --// deregisterBroker removes a broker from the seedsBroker list, and if it's --// not the seedbroker, removes it from brokers map completely. --func (client *client) deregisterBroker(broker *Broker) { -- client.lock.Lock() -- defer client.lock.Unlock() -- -- if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] { -- client.deadSeeds = append(client.deadSeeds, broker) -- client.seedBrokers = client.seedBrokers[1:] -- } else { -- // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever, -- // but we really shouldn't have to; once that loop is made better this case can be -- // removed, and the function generally can be renamed from `deregisterBroker` to -- // `nextSeedBroker` or something -- Logger.Printf(""client/brokers deregistered broker #%d at %s"", broker.ID(), broker.Addr()) -- delete(client.brokers, broker.ID()) -- } --} -- --func (client *client) resurrectDeadBrokers() { -- client.lock.Lock() -- defer client.lock.Unlock() -- -- Logger.Printf(""client/brokers resurrecting %d dead seed brokers"", len(client.deadSeeds)) -- client.seedBrokers = append(client.seedBrokers, client.deadSeeds...) -- client.deadSeeds = nil --} -- --func (client *client) any() *Broker { -- client.lock.RLock() -- defer client.lock.RUnlock() -- -- if len(client.seedBrokers) > 0 { -- _ = client.seedBrokers[0].Open(client.conf) -- return client.seedBrokers[0] -- } -- -- // not guaranteed to be random *or* deterministic -- for _, broker := range client.brokers { -- _ = broker.Open(client.conf) -- return broker -- } -- -- return nil --} -- --// private caching/lazy metadata helpers -- --type partitionType int -- --const ( -- allPartitions partitionType = iota -- writablePartitions -- // If you add any more types, update the partition cache in update() -- -- // Ensure this is the last partition type value -- maxPartitionIndex --) -- --func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata { -- client.lock.RLock() -- defer client.lock.RUnlock() -- -- partitions := client.metadata[topic] -- if partitions != nil { -- return partitions[partitionID] -- } -- -- return nil --} -- --func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 { -- client.lock.RLock() -- defer client.lock.RUnlock() -- -- partitions, exists := client.cachedPartitionsResults[topic] -- -- if !exists { -- return nil -- } -- return partitions[partitionSet] --} -- --func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 { -- partitions := client.metadata[topic] -- -- if partitions == nil { -- return nil -- } -- -- ret := make([]int32, 0, len(partitions)) -- for _, partition := range partitions { -- if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable { -- continue -- } -- ret = append(ret, partition.ID) -- } -- -- sort.Sort(int32Slice(ret)) -- return ret --} -- --func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) { -- client.lock.RLock() -- defer client.lock.RUnlock() -- -- partitions := client.metadata[topic] -- if partitions != nil { -- metadata, ok := partitions[partitionID] -- if ok { -- if metadata.Err == ErrLeaderNotAvailable { -- return nil, ErrLeaderNotAvailable -- } -- b := client.brokers[metadata.Leader] -- if b == nil { -- return nil, ErrLeaderNotAvailable -- } -- _ = b.Open(client.conf) -- return b, nil -- } -- } -- -- return nil, ErrUnknownTopicOrPartition --} -- --func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) { -- broker, err := client.Leader(topic, partitionID) -- if err != nil { -- return -1, err -- } -- -- request := &OffsetRequest{} -- if client.conf.Version.IsAtLeast(V0_10_1_0) { -- request.Version = 1 -- } -- request.AddBlock(topic, partitionID, time, 1) -- -- response, err := broker.GetAvailableOffsets(request) -- if err != nil { -- _ = broker.Close() -- return -1, err -- } -- -- block := response.GetBlock(topic, partitionID) -- if block == nil { -- _ = broker.Close() -- return -1, ErrIncompleteResponse -- } -- if block.Err != ErrNoError { -- return -1, block.Err -- } -- if len(block.Offsets) != 1 { -- return -1, ErrOffsetOutOfRange -- } -- -- return block.Offsets[0], nil --} -- --// core metadata update logic -- --func (client *client) backgroundMetadataUpdater() { -- defer close(client.closed) -- -- if client.conf.Metadata.RefreshFrequency == time.Duration(0) { -- return -- } -- -- ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency) -- defer ticker.Stop() -- -- for { -- select { -- case <-ticker.C: -- topics := []string{} -- if !client.conf.Metadata.Full { -- if specificTopics, err := client.Topics(); err != nil { -- Logger.Println(""Client background metadata topic load:"", err) -- break -- } else if len(specificTopics) == 0 { -- Logger.Println(""Client background metadata update: no specific topics to update"") -- break -- } else { -- topics = specificTopics -- } -- } -- -- if err := client.RefreshMetadata(topics...); err != nil { -- Logger.Println(""Client background metadata update:"", err) -- } -- case <-client.closer: -- return -- } -- } --} -- --func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error { -- retry := func(err error) error { -- if attemptsRemaining > 0 { -- Logger.Printf(""client/metadata retrying after %dms... (%d attempts remaining)\n"", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) -- time.Sleep(client.conf.Metadata.Retry.Backoff) -- return client.tryRefreshMetadata(topics, attemptsRemaining-1) -- } -- return err -- } -- -- for broker := client.any(); broker != nil; broker = client.any() { -- if len(topics) > 0 { -- Logger.Printf(""client/metadata fetching metadata for %v from broker %s\n"", topics, broker.addr) -- } else { -- Logger.Printf(""client/metadata fetching metadata for all topics from broker %s\n"", broker.addr) -- } -- response, err := broker.GetMetadata(&MetadataRequest{Topics: topics}) -- -- switch err.(type) { -- case nil: -- // valid response, use it -- shouldRetry, err := client.updateMetadata(response) -- if shouldRetry { -- Logger.Println(""client/metadata found some partitions to be leaderless"") -- return retry(err) // note: err can be nil -- } -- return err -- -- case PacketEncodingError: -- // didn't even send, return the error -- return err -- default: -- // some other error, remove that broker and try again -- Logger.Println(""client/metadata got error from broker while fetching metadata:"", err) -- _ = broker.Close() -- client.deregisterBroker(broker) -- } -- } -- -- Logger.Println(""client/metadata no available broker to send metadata request to"") -- client.resurrectDeadBrokers() -- return retry(ErrOutOfBrokers) --} -- --// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable --func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) { -- client.lock.Lock() -- defer client.lock.Unlock() -- -- // For all the brokers we received: -- // - if it is a new ID, save it -- // - if it is an existing ID, but the address we have is stale, discard the old one and save it -- // - otherwise ignore it, replacing our existing one would just bounce the connection -- for _, broker := range data.Brokers { -- client.registerBroker(broker) -- } -- -- for _, topic := range data.Topics { -- delete(client.metadata, topic.Name) -- delete(client.cachedPartitionsResults, topic.Name) -- -- switch topic.Err { -- case ErrNoError: -- break -- case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results -- err = topic.Err -- continue -- case ErrUnknownTopicOrPartition: // retry, do not store partial partition results -- err = topic.Err -- retry = true -- continue -- case ErrLeaderNotAvailable: // retry, but store partial partition results -- retry = true -- break -- default: // don't retry, don't store partial results -- Logger.Printf(""Unexpected topic-level metadata error: %s"", topic.Err) -- err = topic.Err -- continue -- } -- -- client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions)) -- for _, partition := range topic.Partitions { -- client.metadata[topic.Name][partition.ID] = partition -- if partition.Err == ErrLeaderNotAvailable { -- retry = true -- } -- } -- -- var partitionCache [maxPartitionIndex][]int32 -- partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions) -- partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions) -- client.cachedPartitionsResults[topic.Name] = partitionCache -- } -- -- return --} -- --func (client *client) cachedCoordinator(consumerGroup string) *Broker { -- client.lock.RLock() -- defer client.lock.RUnlock() -- if coordinatorID, ok := client.coordinators[consumerGroup]; ok { -- return client.brokers[coordinatorID] -- } -- return nil --} -- --func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) { -- retry := func(err error) (*ConsumerMetadataResponse, error) { -- if attemptsRemaining > 0 { -- Logger.Printf(""client/coordinator retrying after %dms... (%d attempts remaining)\n"", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) -- time.Sleep(client.conf.Metadata.Retry.Backoff) -- return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1) -- } -- return nil, err -- } -- -- for broker := client.any(); broker != nil; broker = client.any() { -- Logger.Printf(""client/coordinator requesting coordinator for consumergroup %s from %s\n"", consumerGroup, broker.Addr()) -- -- request := new(ConsumerMetadataRequest) -- request.ConsumerGroup = consumerGroup -- -- response, err := broker.GetConsumerMetadata(request) -- -- if err != nil { -- Logger.Printf(""client/coordinator request to broker %s failed: %s\n"", broker.Addr(), err) -- -- switch err.(type) { -- case PacketEncodingError: -- return nil, err -- default: -- _ = broker.Close() -- client.deregisterBroker(broker) -- continue -- } -- } -- -- switch response.Err { -- case ErrNoError: -- Logger.Printf(""client/coordinator coordinator for consumergroup %s is #%d (%s)\n"", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr()) -- return response, nil -- -- case ErrConsumerCoordinatorNotAvailable: -- Logger.Printf(""client/coordinator coordinator for consumer group %s is not available\n"", consumerGroup) -- -- // This is very ugly, but this scenario will only happen once per cluster. -- // The __consumer_offsets topic only has to be created one time. -- // The number of partitions not configurable, but partition 0 should always exist. -- if _, err := client.Leader(""__consumer_offsets"", 0); err != nil { -- Logger.Printf(""client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n"") -- time.Sleep(2 * time.Second) -- } -- -- return retry(ErrConsumerCoordinatorNotAvailable) -- default: -- return nil, response.Err -- } -- } -- -- Logger.Println(""client/coordinator no available broker to send consumer metadata request to"") -- client.resurrectDeadBrokers() -- return retry(ErrOutOfBrokers) --} -diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go -deleted file mode 100644 -index 29ea5c2b36e03..0000000000000 ---- a/vendor/github.com/Shopify/sarama/config.go -+++ /dev/null -@@ -1,442 +0,0 @@ --package sarama -- --import ( -- ""crypto/tls"" -- ""regexp"" -- ""time"" -- -- ""github.com/rcrowley/go-metrics"" --) -- --const defaultClientID = ""sarama"" -- --var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`) -- --// Config is used to pass multiple configuration options to Sarama's constructors. --type Config struct { -- // Net is the namespace for network-level properties used by the Broker, and -- // shared by the Client/Producer/Consumer. -- Net struct { -- // How many outstanding requests a connection is allowed to have before -- // sending on it blocks (default 5). -- MaxOpenRequests int -- -- // All three of the below configurations are similar to the -- // `socket.timeout.ms` setting in JVM kafka. All of them default -- // to 30 seconds. -- DialTimeout time.Duration // How long to wait for the initial connection. -- ReadTimeout time.Duration // How long to wait for a response. -- WriteTimeout time.Duration // How long to wait for a transmit. -- -- TLS struct { -- // Whether or not to use TLS when connecting to the broker -- // (defaults to false). -- Enable bool -- // The TLS configuration to use for secure connections if -- // enabled (defaults to nil). -- Config *tls.Config -- } -- -- // SASL based authentication with broker. While there are multiple SASL authentication methods -- // the current implementation is limited to plaintext (SASL/PLAIN) authentication -- SASL struct { -- // Whether or not to use SASL authentication when connecting to the broker -- // (defaults to false). -- Enable bool -- // Whether or not to send the Kafka SASL handshake first if enabled -- // (defaults to true). You should only set this to false if you're using -- // a non-Kafka SASL proxy. -- Handshake bool -- //username and password for SASL/PLAIN authentication -- User string -- Password string -- } -- -- // KeepAlive specifies the keep-alive period for an active network connection. -- // If zero, keep-alives are disabled. (default is 0: disabled). -- KeepAlive time.Duration -- } -- -- // Metadata is the namespace for metadata management properties used by the -- // Client, and shared by the Producer/Consumer. -- Metadata struct { -- Retry struct { -- // The total number of times to retry a metadata request when the -- // cluster is in the middle of a leader election (default 3). -- Max int -- // How long to wait for leader election to occur before retrying -- // (default 250ms). Similar to the JVM's `retry.backoff.ms`. -- Backoff time.Duration -- } -- // How frequently to refresh the cluster metadata in the background. -- // Defaults to 10 minutes. Set to 0 to disable. Similar to -- // `topic.metadata.refresh.interval.ms` in the JVM version. -- RefreshFrequency time.Duration -- -- // Whether to maintain a full set of metadata for all topics, or just -- // the minimal set that has been necessary so far. The full set is simpler -- // and usually more convenient, but can take up a substantial amount of -- // memory if you have many topics and partitions. Defaults to true. -- Full bool -- } -- -- // Producer is the namespace for configuration related to producing messages, -- // used by the Producer. -- Producer struct { -- // The maximum permitted size of a message (defaults to 1000000). Should be -- // set equal to or smaller than the broker's `message.max.bytes`. -- MaxMessageBytes int -- // The level of acknowledgement reliability needed from the broker (defaults -- // to WaitForLocal). Equivalent to the `request.required.acks` setting of the -- // JVM producer. -- RequiredAcks RequiredAcks -- // The maximum duration the broker will wait the receipt of the number of -- // RequiredAcks (defaults to 10 seconds). This is only relevant when -- // RequiredAcks is set to WaitForAll or a number > 1. Only supports -- // millisecond resolution, nanoseconds will be truncated. Equivalent to -- // the JVM producer's `request.timeout.ms` setting. -- Timeout time.Duration -- // The type of compression to use on messages (defaults to no compression). -- // Similar to `compression.codec` setting of the JVM producer. -- Compression CompressionCodec -- // Generates partitioners for choosing the partition to send messages to -- // (defaults to hashing the message key). Similar to the `partitioner.class` -- // setting for the JVM producer. -- Partitioner PartitionerConstructor -- -- // Return specifies what channels will be populated. If they are set to true, -- // you must read from the respective channels to prevent deadlock. If, -- // however, this config is used to create a `SyncProducer`, both must be set -- // to true and you shall not read from the channels since the producer does -- // this internally. -- Return struct { -- // If enabled, successfully delivered messages will be returned on the -- // Successes channel (default disabled). -- Successes bool -- -- // If enabled, messages that failed to deliver will be returned on the -- // Errors channel, including error (default enabled). -- Errors bool -- } -- -- // The following config options control how often messages are batched up and -- // sent to the broker. By default, messages are sent as fast as possible, and -- // all messages received while the current batch is in-flight are placed -- // into the subsequent batch. -- Flush struct { -- // The best-effort number of bytes needed to trigger a flush. Use the -- // global sarama.MaxRequestSize to set a hard upper limit. -- Bytes int -- // The best-effort number of messages needed to trigger a flush. Use -- // `MaxMessages` to set a hard upper limit. -- Messages int -- // The best-effort frequency of flushes. Equivalent to -- // `queue.buffering.max.ms` setting of JVM producer. -- Frequency time.Duration -- // The maximum number of messages the producer will send in a single -- // broker request. Defaults to 0 for unlimited. Similar to -- // `queue.buffering.max.messages` in the JVM producer. -- MaxMessages int -- } -- -- Retry struct { -- // The total number of times to retry sending a message (default 3). -- // Similar to the `message.send.max.retries` setting of the JVM producer. -- Max int -- // How long to wait for the cluster to settle between retries -- // (default 100ms). Similar to the `retry.backoff.ms` setting of the -- // JVM producer. -- Backoff time.Duration -- } -- } -- -- // Consumer is the namespace for configuration related to consuming messages, -- // used by the Consumer. -- // -- // Note that Sarama's Consumer type does not currently support automatic -- // consumer-group rebalancing and offset tracking. For Zookeeper-based -- // tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka -- // library builds on Sarama to add this support. For Kafka-based tracking -- // (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library -- // builds on Sarama to add this support. -- Consumer struct { -- Retry struct { -- // How long to wait after a failing to read from a partition before -- // trying again (default 2s). -- Backoff time.Duration -- } -- -- // Fetch is the namespace for controlling how many bytes are retrieved by any -- // given request. -- Fetch struct { -- // The minimum number of message bytes to fetch in a request - the broker -- // will wait until at least this many are available. The default is 1, -- // as 0 causes the consumer to spin when no messages are available. -- // Equivalent to the JVM's `fetch.min.bytes`. -- Min int32 -- // The default number of message bytes to fetch from the broker in each -- // request (default 1MB). This should be larger than the majority of -- // your messages, or else the consumer will spend a lot of time -- // negotiating sizes and not actually consuming. Similar to the JVM's -- // `fetch.message.max.bytes`. -- Default int32 -- // The maximum number of message bytes to fetch from the broker in a -- // single request. Messages larger than this will return -- // ErrMessageTooLarge and will not be consumable, so you must be sure -- // this is at least as large as your largest message. Defaults to 0 -- // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The -- // global `sarama.MaxResponseSize` still applies. -- Max int32 -- } -- // The maximum amount of time the broker will wait for Consumer.Fetch.Min -- // bytes to become available before it returns fewer than that anyways. The -- // default is 250ms, since 0 causes the consumer to spin when no events are -- // available. 100-500ms is a reasonable range for most cases. Kafka only -- // supports precision up to milliseconds; nanoseconds will be truncated. -- // Equivalent to the JVM's `fetch.wait.max.ms`. -- MaxWaitTime time.Duration -- -- // The maximum amount of time the consumer expects a message takes to -- // process for the user. If writing to the Messages channel takes longer -- // than this, that partition will stop fetching more messages until it -- // can proceed again. -- // Note that, since the Messages channel is buffered, the actual grace time is -- // (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms. -- // If a message is not written to the Messages channel between two ticks -- // of the expiryTicker then a timeout is detected. -- // Using a ticker instead of a timer to detect timeouts should typically -- // result in many fewer calls to Timer functions which may result in a -- // significant performance improvement if many messages are being sent -- // and timeouts are infrequent. -- // The disadvantage of using a ticker instead of a timer is that -- // timeouts will be less accurate. That is, the effective timeout could -- // be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For -- // example, if `MaxProcessingTime` is 100ms then a delay of 180ms -- // between two messages being sent may not be recognized as a timeout. -- MaxProcessingTime time.Duration -- -- // Return specifies what channels will be populated. If they are set to true, -- // you must read from them to prevent deadlock. -- Return struct { -- // If enabled, any errors that occurred while consuming are returned on -- // the Errors channel (default disabled). -- Errors bool -- } -- -- // Offsets specifies configuration for how and when to commit consumed -- // offsets. This currently requires the manual use of an OffsetManager -- // but will eventually be automated. -- Offsets struct { -- // How frequently to commit updated offsets. Defaults to 1s. -- CommitInterval time.Duration -- -- // The initial offset to use if no offset was previously committed. -- // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest. -- Initial int64 -- -- // The retention duration for committed offsets. If zero, disabled -- // (in which case the `offsets.retention.minutes` option on the -- // broker will be used). Kafka only supports precision up to -- // milliseconds; nanoseconds will be truncated. Requires Kafka -- // broker version 0.9.0 or later. -- // (default is 0: disabled). -- Retention time.Duration -- } -- } -- -- // A user-provided string sent with every request to the brokers for logging, -- // debugging, and auditing purposes. Defaults to ""sarama"", but you should -- // probably set it to something specific to your application. -- ClientID string -- // The number of events to buffer in internal and external channels. This -- // permits the producer and consumer to continue processing some messages -- // in the background while user code is working, greatly improving throughput. -- // Defaults to 256. -- ChannelBufferSize int -- // The version of Kafka that Sarama will assume it is running against. -- // Defaults to the oldest supported stable version. Since Kafka provides -- // backwards-compatibility, setting it to a version older than you have -- // will not break anything, although it may prevent you from using the -- // latest features. Setting it to a version greater than you are actually -- // running may lead to random breakage. -- Version KafkaVersion -- // The registry to define metrics into. -- // Defaults to a local registry. -- // If you want to disable metrics gathering, set ""metrics.UseNilMetrics"" to ""true"" -- // prior to starting Sarama. -- // See Examples on how to use the metrics registry -- MetricRegistry metrics.Registry --} -- --// NewConfig returns a new configuration instance with sane defaults. --func NewConfig() *Config { -- c := &Config{} -- -- c.Net.MaxOpenRequests = 5 -- c.Net.DialTimeout = 30 * time.Second -- c.Net.ReadTimeout = 30 * time.Second -- c.Net.WriteTimeout = 30 * time.Second -- c.Net.SASL.Handshake = true -- -- c.Metadata.Retry.Max = 3 -- c.Metadata.Retry.Backoff = 250 * time.Millisecond -- c.Metadata.RefreshFrequency = 10 * time.Minute -- c.Metadata.Full = true -- -- c.Producer.MaxMessageBytes = 1000000 -- c.Producer.RequiredAcks = WaitForLocal -- c.Producer.Timeout = 10 * time.Second -- c.Producer.Partitioner = NewHashPartitioner -- c.Producer.Retry.Max = 3 -- c.Producer.Retry.Backoff = 100 * time.Millisecond -- c.Producer.Return.Errors = true -- -- c.Consumer.Fetch.Min = 1 -- c.Consumer.Fetch.Default = 1024 * 1024 -- c.Consumer.Retry.Backoff = 2 * time.Second -- c.Consumer.MaxWaitTime = 250 * time.Millisecond -- c.Consumer.MaxProcessingTime = 100 * time.Millisecond -- c.Consumer.Return.Errors = false -- c.Consumer.Offsets.CommitInterval = 1 * time.Second -- c.Consumer.Offsets.Initial = OffsetNewest -- -- c.ClientID = defaultClientID -- c.ChannelBufferSize = 256 -- c.Version = minVersion -- c.MetricRegistry = metrics.NewRegistry() -- -- return c --} -- --// Validate checks a Config instance. It will return a --// ConfigurationError if the specified values don't make sense. --func (c *Config) Validate() error { -- // some configuration values should be warned on but not fail completely, do those first -- if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil { -- Logger.Println(""Net.TLS is disabled but a non-nil configuration was provided."") -- } -- if c.Net.SASL.Enable == false { -- if c.Net.SASL.User != """" { -- Logger.Println(""Net.SASL is disabled but a non-empty username was provided."") -- } -- if c.Net.SASL.Password != """" { -- Logger.Println(""Net.SASL is disabled but a non-empty password was provided."") -- } -- } -- if c.Producer.RequiredAcks > 1 { -- Logger.Println(""Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0."") -- } -- if c.Producer.MaxMessageBytes >= int(MaxRequestSize) { -- Logger.Println(""Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored."") -- } -- if c.Producer.Flush.Bytes >= int(MaxRequestSize) { -- Logger.Println(""Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored."") -- } -- if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 { -- Logger.Println(""Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed."") -- } -- if c.Producer.Timeout%time.Millisecond != 0 { -- Logger.Println(""Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated."") -- } -- if c.Consumer.MaxWaitTime < 100*time.Millisecond { -- Logger.Println(""Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details."") -- } -- if c.Consumer.MaxWaitTime%time.Millisecond != 0 { -- Logger.Println(""Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated."") -- } -- if c.Consumer.Offsets.Retention%time.Millisecond != 0 { -- Logger.Println(""Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated."") -- } -- if c.ClientID == defaultClientID { -- Logger.Println(""ClientID is the default of 'sarama', you should consider setting it to something application-specific."") -- } -- -- // validate Net values -- switch { -- case c.Net.MaxOpenRequests <= 0: -- return ConfigurationError(""Net.MaxOpenRequests must be > 0"") -- case c.Net.DialTimeout <= 0: -- return ConfigurationError(""Net.DialTimeout must be > 0"") -- case c.Net.ReadTimeout <= 0: -- return ConfigurationError(""Net.ReadTimeout must be > 0"") -- case c.Net.WriteTimeout <= 0: -- return ConfigurationError(""Net.WriteTimeout must be > 0"") -- case c.Net.KeepAlive < 0: -- return ConfigurationError(""Net.KeepAlive must be >= 0"") -- case c.Net.SASL.Enable == true && c.Net.SASL.User == """": -- return ConfigurationError(""Net.SASL.User must not be empty when SASL is enabled"") -- case c.Net.SASL.Enable == true && c.Net.SASL.Password == """": -- return ConfigurationError(""Net.SASL.Password must not be empty when SASL is enabled"") -- } -- -- // validate the Metadata values -- switch { -- case c.Metadata.Retry.Max < 0: -- return ConfigurationError(""Metadata.Retry.Max must be >= 0"") -- case c.Metadata.Retry.Backoff < 0: -- return ConfigurationError(""Metadata.Retry.Backoff must be >= 0"") -- case c.Metadata.RefreshFrequency < 0: -- return ConfigurationError(""Metadata.RefreshFrequency must be >= 0"") -- } -- -- // validate the Producer values -- switch { -- case c.Producer.MaxMessageBytes <= 0: -- return ConfigurationError(""Producer.MaxMessageBytes must be > 0"") -- case c.Producer.RequiredAcks < -1: -- return ConfigurationError(""Producer.RequiredAcks must be >= -1"") -- case c.Producer.Timeout <= 0: -- return ConfigurationError(""Producer.Timeout must be > 0"") -- case c.Producer.Partitioner == nil: -- return ConfigurationError(""Producer.Partitioner must not be nil"") -- case c.Producer.Flush.Bytes < 0: -- return ConfigurationError(""Producer.Flush.Bytes must be >= 0"") -- case c.Producer.Flush.Messages < 0: -- return ConfigurationError(""Producer.Flush.Messages must be >= 0"") -- case c.Producer.Flush.Frequency < 0: -- return ConfigurationError(""Producer.Flush.Frequency must be >= 0"") -- case c.Producer.Flush.MaxMessages < 0: -- return ConfigurationError(""Producer.Flush.MaxMessages must be >= 0"") -- case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages: -- return ConfigurationError(""Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set"") -- case c.Producer.Retry.Max < 0: -- return ConfigurationError(""Producer.Retry.Max must be >= 0"") -- case c.Producer.Retry.Backoff < 0: -- return ConfigurationError(""Producer.Retry.Backoff must be >= 0"") -- } -- -- if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) { -- return ConfigurationError(""lz4 compression requires Version >= V0_10_0_0"") -- } -- -- // validate the Consumer values -- switch { -- case c.Consumer.Fetch.Min <= 0: -- return ConfigurationError(""Consumer.Fetch.Min must be > 0"") -- case c.Consumer.Fetch.Default <= 0: -- return ConfigurationError(""Consumer.Fetch.Default must be > 0"") -- case c.Consumer.Fetch.Max < 0: -- return ConfigurationError(""Consumer.Fetch.Max must be >= 0"") -- case c.Consumer.MaxWaitTime < 1*time.Millisecond: -- return ConfigurationError(""Consumer.MaxWaitTime must be >= 1ms"") -- case c.Consumer.MaxProcessingTime <= 0: -- return ConfigurationError(""Consumer.MaxProcessingTime must be > 0"") -- case c.Consumer.Retry.Backoff < 0: -- return ConfigurationError(""Consumer.Retry.Backoff must be >= 0"") -- case c.Consumer.Offsets.CommitInterval <= 0: -- return ConfigurationError(""Consumer.Offsets.CommitInterval must be > 0"") -- case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest: -- return ConfigurationError(""Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest"") -- -- } -- -- // validate misc shared values -- switch { -- case c.ChannelBufferSize < 0: -- return ConfigurationError(""ChannelBufferSize must be >= 0"") -- case !validID.MatchString(c.ClientID): -- return ConfigurationError(""ClientID is invalid"") -- } -- -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/Shopify/sarama/config_resource_type.go -deleted file mode 100644 -index 848cc9c90c538..0000000000000 ---- a/vendor/github.com/Shopify/sarama/config_resource_type.go -+++ /dev/null -@@ -1,15 +0,0 @@ --package sarama -- --type ConfigResourceType int8 -- --// Taken from : --// https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes -- --const ( -- UnknownResource ConfigResourceType = 0 -- AnyResource ConfigResourceType = 1 -- TopicResource ConfigResourceType = 2 -- GroupResource ConfigResourceType = 3 -- ClusterResource ConfigResourceType = 4 -- BrokerResource ConfigResourceType = 5 --) -diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go -deleted file mode 100644 -index 48d231cf9844e..0000000000000 ---- a/vendor/github.com/Shopify/sarama/consumer.go -+++ /dev/null -@@ -1,833 +0,0 @@ --package sarama -- --import ( -- ""errors"" -- ""fmt"" -- ""sync"" -- ""sync/atomic"" -- ""time"" --) -- --// ConsumerMessage encapsulates a Kafka message returned by the consumer. --type ConsumerMessage struct { -- Key, Value []byte -- Topic string -- Partition int32 -- Offset int64 -- Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp -- BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp -- Headers []*RecordHeader // only set if kafka is version 0.11+ --} -- --// ConsumerError is what is provided to the user when an error occurs. --// It wraps an error and includes the topic and partition. --type ConsumerError struct { -- Topic string -- Partition int32 -- Err error --} -- --func (ce ConsumerError) Error() string { -- return fmt.Sprintf(""kafka: error while consuming %s/%d: %s"", ce.Topic, ce.Partition, ce.Err) --} -- --// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface. --// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors --// when stopping. --type ConsumerErrors []*ConsumerError -- --func (ce ConsumerErrors) Error() string { -- return fmt.Sprintf(""kafka: %d errors while consuming"", len(ce)) --} -- --// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close() --// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of --// scope. --// --// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking. --// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library --// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the --// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. --type Consumer interface { -- -- // Topics returns the set of available topics as retrieved from the cluster -- // metadata. This method is the same as Client.Topics(), and is provided for -- // convenience. -- Topics() ([]string, error) -- -- // Partitions returns the sorted list of all partition IDs for the given topic. -- // This method is the same as Client.Partitions(), and is provided for convenience. -- Partitions(topic string) ([]int32, error) -- -- // ConsumePartition creates a PartitionConsumer on the given topic/partition with -- // the given offset. It will return an error if this Consumer is already consuming -- // on the given topic/partition. Offset can be a literal offset, or OffsetNewest -- // or OffsetOldest -- ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) -- -- // HighWaterMarks returns the current high water marks for each topic and partition. -- // Consistency between partitions is not guaranteed since high water marks are updated separately. -- HighWaterMarks() map[string]map[int32]int64 -- -- // Close shuts down the consumer. It must be called after all child -- // PartitionConsumers have already been closed. -- Close() error --} -- --type consumer struct { -- client Client -- conf *Config -- ownClient bool -- -- lock sync.Mutex -- children map[string]map[int32]*partitionConsumer -- brokerConsumers map[*Broker]*brokerConsumer --} -- --// NewConsumer creates a new consumer using the given broker addresses and configuration. --func NewConsumer(addrs []string, config *Config) (Consumer, error) { -- client, err := NewClient(addrs, config) -- if err != nil { -- return nil, err -- } -- -- c, err := NewConsumerFromClient(client) -- if err != nil { -- return nil, err -- } -- c.(*consumer).ownClient = true -- return c, nil --} -- --// NewConsumerFromClient creates a new consumer using the given client. It is still --// necessary to call Close() on the underlying client when shutting down this consumer. --func NewConsumerFromClient(client Client) (Consumer, error) { -- // Check that we are not dealing with a closed Client before processing any other arguments -- if client.Closed() { -- return nil, ErrClosedClient -- } -- -- c := &consumer{ -- client: client, -- conf: client.Config(), -- children: make(map[string]map[int32]*partitionConsumer), -- brokerConsumers: make(map[*Broker]*brokerConsumer), -- } -- -- return c, nil --} -- --func (c *consumer) Close() error { -- if c.ownClient { -- return c.client.Close() -- } -- return nil --} -- --func (c *consumer) Topics() ([]string, error) { -- return c.client.Topics() --} -- --func (c *consumer) Partitions(topic string) ([]int32, error) { -- return c.client.Partitions(topic) --} -- --func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) { -- child := &partitionConsumer{ -- consumer: c, -- conf: c.conf, -- topic: topic, -- partition: partition, -- messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize), -- errors: make(chan *ConsumerError, c.conf.ChannelBufferSize), -- feeder: make(chan *FetchResponse, 1), -- trigger: make(chan none, 1), -- dying: make(chan none), -- fetchSize: c.conf.Consumer.Fetch.Default, -- } -- -- if err := child.chooseStartingOffset(offset); err != nil { -- return nil, err -- } -- -- var leader *Broker -- var err error -- if leader, err = c.client.Leader(child.topic, child.partition); err != nil { -- return nil, err -- } -- -- if err := c.addChild(child); err != nil { -- return nil, err -- } -- -- go withRecover(child.dispatcher) -- go withRecover(child.responseFeeder) -- -- child.broker = c.refBrokerConsumer(leader) -- child.broker.input <- child -- -- return child, nil --} -- --func (c *consumer) HighWaterMarks() map[string]map[int32]int64 { -- c.lock.Lock() -- defer c.lock.Unlock() -- -- hwms := make(map[string]map[int32]int64) -- for topic, p := range c.children { -- hwm := make(map[int32]int64, len(p)) -- for partition, pc := range p { -- hwm[partition] = pc.HighWaterMarkOffset() -- } -- hwms[topic] = hwm -- } -- -- return hwms --} -- --func (c *consumer) addChild(child *partitionConsumer) error { -- c.lock.Lock() -- defer c.lock.Unlock() -- -- topicChildren := c.children[child.topic] -- if topicChildren == nil { -- topicChildren = make(map[int32]*partitionConsumer) -- c.children[child.topic] = topicChildren -- } -- -- if topicChildren[child.partition] != nil { -- return ConfigurationError(""That topic/partition is already being consumed"") -- } -- -- topicChildren[child.partition] = child -- return nil --} -- --func (c *consumer) removeChild(child *partitionConsumer) { -- c.lock.Lock() -- defer c.lock.Unlock() -- -- delete(c.children[child.topic], child.partition) --} -- --func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer { -- c.lock.Lock() -- defer c.lock.Unlock() -- -- bc := c.brokerConsumers[broker] -- if bc == nil { -- bc = c.newBrokerConsumer(broker) -- c.brokerConsumers[broker] = bc -- } -- -- bc.refs++ -- -- return bc --} -- --func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) { -- c.lock.Lock() -- defer c.lock.Unlock() -- -- brokerWorker.refs-- -- -- if brokerWorker.refs == 0 { -- close(brokerWorker.input) -- if c.brokerConsumers[brokerWorker.broker] == brokerWorker { -- delete(c.brokerConsumers, brokerWorker.broker) -- } -- } --} -- --func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { -- c.lock.Lock() -- defer c.lock.Unlock() -- -- delete(c.brokerConsumers, brokerWorker.broker) --} -- --// PartitionConsumer -- --// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or --// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out --// of scope. --// --// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range --// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported --// as out of range by the brokers. In this case you should decide what you want to do (try a different offset, --// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying. --// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set --// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement --// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches. --// --// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of --// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process --// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call --// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will --// also drain the Messages channel, harvest all errors & return them once cleanup has completed. --type PartitionConsumer interface { -- -- // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you -- // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this -- // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call -- // this before calling Close on the underlying client. -- AsyncClose() -- -- // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain -- // the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service -- // the Messages channel when this function is called, you will be competing with Close for messages; consider -- // calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes -- // out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client. -- Close() error -- -- // Messages returns the read channel for the messages that are returned by -- // the broker. -- Messages() <-chan *ConsumerMessage -- -- // Errors returns a read channel of errors that occurred during consuming, if -- // enabled. By default, errors are logged and not returned over this channel. -- // If you want to implement any custom error handling, set your config's -- // Consumer.Return.Errors setting to true, and read from this channel. -- Errors() <-chan *ConsumerError -- -- // HighWaterMarkOffset returns the high water mark offset of the partition, -- // i.e. the offset that will be used for the next message that will be produced. -- // You can use this to determine how far behind the processing is. -- HighWaterMarkOffset() int64 --} -- --type partitionConsumer struct { -- highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG -- consumer *consumer -- conf *Config -- topic string -- partition int32 -- -- broker *brokerConsumer -- messages chan *ConsumerMessage -- errors chan *ConsumerError -- feeder chan *FetchResponse -- -- trigger, dying chan none -- responseResult error -- -- fetchSize int32 -- offset int64 --} -- --var errTimedOut = errors.New(""timed out feeding messages to the user"") // not user-facing -- --func (child *partitionConsumer) sendError(err error) { -- cErr := &ConsumerError{ -- Topic: child.topic, -- Partition: child.partition, -- Err: err, -- } -- -- if child.conf.Consumer.Return.Errors { -- child.errors <- cErr -- } else { -- Logger.Println(cErr) -- } --} -- --func (child *partitionConsumer) dispatcher() { -- for range child.trigger { -- select { -- case <-child.dying: -- close(child.trigger) -- case <-time.After(child.conf.Consumer.Retry.Backoff): -- if child.broker != nil { -- child.consumer.unrefBrokerConsumer(child.broker) -- child.broker = nil -- } -- -- Logger.Printf(""consumer/%s/%d finding new broker\n"", child.topic, child.partition) -- if err := child.dispatch(); err != nil { -- child.sendError(err) -- child.trigger <- none{} -- } -- } -- } -- -- if child.broker != nil { -- child.consumer.unrefBrokerConsumer(child.broker) -- } -- child.consumer.removeChild(child) -- close(child.feeder) --} -- --func (child *partitionConsumer) dispatch() error { -- if err := child.consumer.client.RefreshMetadata(child.topic); err != nil { -- return err -- } -- -- var leader *Broker -- var err error -- if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil { -- return err -- } -- -- child.broker = child.consumer.refBrokerConsumer(leader) -- -- child.broker.input <- child -- -- return nil --} -- --func (child *partitionConsumer) chooseStartingOffset(offset int64) error { -- newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest) -- if err != nil { -- return err -- } -- oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest) -- if err != nil { -- return err -- } -- -- switch { -- case offset == OffsetNewest: -- child.offset = newestOffset -- case offset == OffsetOldest: -- child.offset = oldestOffset -- case offset >= oldestOffset && offset <= newestOffset: -- child.offset = offset -- default: -- return ErrOffsetOutOfRange -- } -- -- return nil --} -- --func (child *partitionConsumer) Messages() <-chan *ConsumerMessage { -- return child.messages --} -- --func (child *partitionConsumer) Errors() <-chan *ConsumerError { -- return child.errors --} -- --func (child *partitionConsumer) AsyncClose() { -- // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes -- // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and -- // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will -- // also just close itself) -- close(child.dying) --} -- --func (child *partitionConsumer) Close() error { -- child.AsyncClose() -- -- go withRecover(func() { -- for range child.messages { -- // drain -- } -- }) -- -- var errors ConsumerErrors -- for err := range child.errors { -- errors = append(errors, err) -- } -- -- if len(errors) > 0 { -- return errors -- } -- return nil --} -- --func (child *partitionConsumer) HighWaterMarkOffset() int64 { -- return atomic.LoadInt64(&child.highWaterMarkOffset) --} -- --func (child *partitionConsumer) responseFeeder() { -- var msgs []*ConsumerMessage -- expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime) -- firstAttempt := true -- --feederLoop: -- for response := range child.feeder { -- msgs, child.responseResult = child.parseResponse(response) -- -- for i, msg := range msgs { -- messageSelect: -- select { -- case child.messages <- msg: -- firstAttempt = true -- case <-expiryTicker.C: -- if !firstAttempt { -- child.responseResult = errTimedOut -- child.broker.acks.Done() -- for _, msg = range msgs[i:] { -- child.messages <- msg -- } -- child.broker.input <- child -- expiryTicker.Stop() -- continue feederLoop -- } else { -- // current message has not been sent, return to select -- // statement -- firstAttempt = false -- goto messageSelect -- } -- } -- } -- -- child.broker.acks.Done() -- } -- -- expiryTicker.Stop() -- close(child.messages) -- close(child.errors) --} -- --func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) { -- var messages []*ConsumerMessage -- var incomplete bool -- prelude := true -- -- for _, msgBlock := range msgSet.Messages { -- for _, msg := range msgBlock.Messages() { -- offset := msg.Offset -- if msg.Msg.Version >= 1 { -- baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset -- offset += baseOffset -- } -- if prelude && offset < child.offset { -- continue -- } -- prelude = false -- -- if offset >= child.offset { -- messages = append(messages, &ConsumerMessage{ -- Topic: child.topic, -- Partition: child.partition, -- Key: msg.Msg.Key, -- Value: msg.Msg.Value, -- Offset: offset, -- Timestamp: msg.Msg.Timestamp, -- BlockTimestamp: msgBlock.Msg.Timestamp, -- }) -- child.offset = offset + 1 -- } else { -- incomplete = true -- } -- } -- } -- -- if incomplete || len(messages) == 0 { -- return nil, ErrIncompleteResponse -- } -- return messages, nil --} -- --func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) { -- var messages []*ConsumerMessage -- var incomplete bool -- prelude := true -- originalOffset := child.offset -- -- for _, rec := range batch.Records { -- offset := batch.FirstOffset + rec.OffsetDelta -- if prelude && offset < child.offset { -- continue -- } -- prelude = false -- -- if offset >= child.offset { -- messages = append(messages, &ConsumerMessage{ -- Topic: child.topic, -- Partition: child.partition, -- Key: rec.Key, -- Value: rec.Value, -- Offset: offset, -- Timestamp: batch.FirstTimestamp.Add(rec.TimestampDelta), -- Headers: rec.Headers, -- }) -- child.offset = offset + 1 -- } else { -- incomplete = true -- } -- } -- -- if incomplete { -- return nil, ErrIncompleteResponse -- } -- -- child.offset = batch.FirstOffset + int64(batch.LastOffsetDelta) + 1 -- if child.offset <= originalOffset { -- return nil, ErrConsumerOffsetNotAdvanced -- } -- -- return messages, nil --} -- --func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { -- block := response.GetBlock(child.topic, child.partition) -- if block == nil { -- return nil, ErrIncompleteResponse -- } -- -- if block.Err != ErrNoError { -- return nil, block.Err -- } -- -- nRecs, err := block.numRecords() -- if err != nil { -- return nil, err -- } -- if nRecs == 0 { -- partialTrailingMessage, err := block.isPartial() -- if err != nil { -- return nil, err -- } -- // We got no messages. If we got a trailing one then we need to ask for more data. -- // Otherwise we just poll again and wait for one to be produced... -- if partialTrailingMessage { -- if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max { -- // we can't ask for more data, we've hit the configured limit -- child.sendError(ErrMessageTooLarge) -- child.offset++ // skip this one so we can keep processing future messages -- } else { -- child.fetchSize *= 2 -- if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max { -- child.fetchSize = child.conf.Consumer.Fetch.Max -- } -- } -- } -- -- return nil, nil -- } -- -- // we got messages, reset our fetch size in case it was increased for a previous request -- child.fetchSize = child.conf.Consumer.Fetch.Default -- atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset) -- -- messages := []*ConsumerMessage{} -- for _, records := range block.RecordsSet { -- if control, err := records.isControl(); err != nil || control { -- continue -- } -- -- switch records.recordsType { -- case legacyRecords: -- messageSetMessages, err := child.parseMessages(records.msgSet) -- if err != nil { -- return nil, err -- } -- -- messages = append(messages, messageSetMessages...) -- case defaultRecords: -- recordBatchMessages, err := child.parseRecords(records.recordBatch) -- if err != nil { -- return nil, err -- } -- -- messages = append(messages, recordBatchMessages...) -- default: -- return nil, fmt.Errorf(""unknown records type: %v"", records.recordsType) -- } -- } -- -- return messages, nil --} -- --// brokerConsumer -- --type brokerConsumer struct { -- consumer *consumer -- broker *Broker -- input chan *partitionConsumer -- newSubscriptions chan []*partitionConsumer -- wait chan none -- subscriptions map[*partitionConsumer]none -- acks sync.WaitGroup -- refs int --} -- --func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { -- bc := &brokerConsumer{ -- consumer: c, -- broker: broker, -- input: make(chan *partitionConsumer), -- newSubscriptions: make(chan []*partitionConsumer), -- wait: make(chan none), -- subscriptions: make(map[*partitionConsumer]none), -- refs: 0, -- } -- -- go withRecover(bc.subscriptionManager) -- go withRecover(bc.subscriptionConsumer) -- -- return bc --} -- --func (bc *brokerConsumer) subscriptionManager() { -- var buffer []*partitionConsumer -- -- // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer -- // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks -- // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give -- // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, -- // so the main goroutine can block waiting for work if it has none. -- for { -- if len(buffer) > 0 { -- select { -- case event, ok := <-bc.input: -- if !ok { -- goto done -- } -- buffer = append(buffer, event) -- case bc.newSubscriptions <- buffer: -- buffer = nil -- case bc.wait <- none{}: -- } -- } else { -- select { -- case event, ok := <-bc.input: -- if !ok { -- goto done -- } -- buffer = append(buffer, event) -- case bc.newSubscriptions <- nil: -- } -- } -- } -- --done: -- close(bc.wait) -- if len(buffer) > 0 { -- bc.newSubscriptions <- buffer -- } -- close(bc.newSubscriptions) --} -- --func (bc *brokerConsumer) subscriptionConsumer() { -- <-bc.wait // wait for our first piece of work -- -- // the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available -- for newSubscriptions := range bc.newSubscriptions { -- bc.updateSubscriptions(newSubscriptions) -- -- if len(bc.subscriptions) == 0 { -- // We're about to be shut down or we're about to receive more subscriptions. -- // Either way, the signal just hasn't propagated to our goroutine yet. -- <-bc.wait -- continue -- } -- -- response, err := bc.fetchNewMessages() -- -- if err != nil { -- Logger.Printf(""consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n"", bc.broker.ID(), err) -- bc.abort(err) -- return -- } -- -- bc.acks.Add(len(bc.subscriptions)) -- for child := range bc.subscriptions { -- child.feeder <- response -- } -- bc.acks.Wait() -- bc.handleResponses() -- } --} -- --func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) { -- for _, child := range newSubscriptions { -- bc.subscriptions[child] = none{} -- Logger.Printf(""consumer/broker/%d added subscription to %s/%d\n"", bc.broker.ID(), child.topic, child.partition) -- } -- -- for child := range bc.subscriptions { -- select { -- case <-child.dying: -- Logger.Printf(""consumer/broker/%d closed dead subscription to %s/%d\n"", bc.broker.ID(), child.topic, child.partition) -- close(child.trigger) -- delete(bc.subscriptions, child) -- default: -- break -- } -- } --} -- --func (bc *brokerConsumer) handleResponses() { -- // handles the response codes left for us by our subscriptions, and abandons ones that have been closed -- for child := range bc.subscriptions { -- result := child.responseResult -- child.responseResult = nil -- -- switch result { -- case nil: -- break -- case errTimedOut: -- Logger.Printf(""consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n"", -- bc.broker.ID(), child.topic, child.partition) -- delete(bc.subscriptions, child) -- case ErrOffsetOutOfRange: -- // there's no point in retrying this it will just fail the same way again -- // shut it down and force the user to choose what to do -- child.sendError(result) -- Logger.Printf(""consumer/%s/%d shutting down because %s\n"", child.topic, child.partition, result) -- close(child.trigger) -- delete(bc.subscriptions, child) -- case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable: -- // not an error, but does need redispatching -- Logger.Printf(""consumer/broker/%d abandoned subscription to %s/%d because %s\n"", -- bc.broker.ID(), child.topic, child.partition, result) -- child.trigger <- none{} -- delete(bc.subscriptions, child) -- default: -- // dunno, tell the user and try redispatching -- child.sendError(result) -- Logger.Printf(""consumer/broker/%d abandoned subscription to %s/%d because %s\n"", -- bc.broker.ID(), child.topic, child.partition, result) -- child.trigger <- none{} -- delete(bc.subscriptions, child) -- } -- } --} -- --func (bc *brokerConsumer) abort(err error) { -- bc.consumer.abandonBrokerConsumer(bc) -- _ = bc.broker.Close() // we don't care about the error this might return, we already have one -- -- for child := range bc.subscriptions { -- child.sendError(err) -- child.trigger <- none{} -- } -- -- for newSubscriptions := range bc.newSubscriptions { -- if len(newSubscriptions) == 0 { -- <-bc.wait -- continue -- } -- for _, child := range newSubscriptions { -- child.sendError(err) -- child.trigger <- none{} -- } -- } --} -- --func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { -- request := &FetchRequest{ -- MinBytes: bc.consumer.conf.Consumer.Fetch.Min, -- MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond), -- } -- if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) { -- request.Version = 2 -- } -- if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) { -- request.Version = 3 -- request.MaxBytes = MaxResponseSize -- } -- if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) { -- request.Version = 4 -- request.Isolation = ReadUncommitted // We don't support yet transactions. -- } -- -- for child := range bc.subscriptions { -- request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) -- } -- -- return bc.broker.Fetch(request) --} -diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go -deleted file mode 100644 -index 9d92d350a5dcb..0000000000000 ---- a/vendor/github.com/Shopify/sarama/consumer_group_members.go -+++ /dev/null -@@ -1,94 +0,0 @@ --package sarama -- --type ConsumerGroupMemberMetadata struct { -- Version int16 -- Topics []string -- UserData []byte --} -- --func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error { -- pe.putInt16(m.Version) -- -- if err := pe.putStringArray(m.Topics); err != nil { -- return err -- } -- -- if err := pe.putBytes(m.UserData); err != nil { -- return err -- } -- -- return nil --} -- --func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) { -- if m.Version, err = pd.getInt16(); err != nil { -- return -- } -- -- if m.Topics, err = pd.getStringArray(); err != nil { -- return -- } -- -- if m.UserData, err = pd.getBytes(); err != nil { -- return -- } -- -- return nil --} -- --type ConsumerGroupMemberAssignment struct { -- Version int16 -- Topics map[string][]int32 -- UserData []byte --} -- --func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error { -- pe.putInt16(m.Version) -- -- if err := pe.putArrayLength(len(m.Topics)); err != nil { -- return err -- } -- -- for topic, partitions := range m.Topics { -- if err := pe.putString(topic); err != nil { -- return err -- } -- if err := pe.putInt32Array(partitions); err != nil { -- return err -- } -- } -- -- if err := pe.putBytes(m.UserData); err != nil { -- return err -- } -- -- return nil --} -- --func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) { -- if m.Version, err = pd.getInt16(); err != nil { -- return -- } -- -- var topicLen int -- if topicLen, err = pd.getArrayLength(); err != nil { -- return -- } -- -- m.Topics = make(map[string][]int32, topicLen) -- for i := 0; i < topicLen; i++ { -- var topic string -- if topic, err = pd.getString(); err != nil { -- return -- } -- if m.Topics[topic], err = pd.getInt32Array(); err != nil { -- return -- } -- } -- -- if m.UserData, err = pd.getBytes(); err != nil { -- return -- } -- -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go -deleted file mode 100644 -index 483be3354df5b..0000000000000 ---- a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go -+++ /dev/null -@@ -1,26 +0,0 @@ --package sarama -- --type ConsumerMetadataRequest struct { -- ConsumerGroup string --} -- --func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error { -- return pe.putString(r.ConsumerGroup) --} -- --func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) { -- r.ConsumerGroup, err = pd.getString() -- return err --} -- --func (r *ConsumerMetadataRequest) key() int16 { -- return 10 --} -- --func (r *ConsumerMetadataRequest) version() int16 { -- return 0 --} -- --func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion { -- return V0_8_2_0 --} -diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go -deleted file mode 100644 -index 6b9632bbafe63..0000000000000 ---- a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go -+++ /dev/null -@@ -1,85 +0,0 @@ --package sarama -- --import ( -- ""net"" -- ""strconv"" --) -- --type ConsumerMetadataResponse struct { -- Err KError -- Coordinator *Broker -- CoordinatorID int32 // deprecated: use Coordinator.ID() -- CoordinatorHost string // deprecated: use Coordinator.Addr() -- CoordinatorPort int32 // deprecated: use Coordinator.Addr() --} -- --func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) { -- tmp, err := pd.getInt16() -- if err != nil { -- return err -- } -- r.Err = KError(tmp) -- -- coordinator := new(Broker) -- if err := coordinator.decode(pd); err != nil { -- return err -- } -- if coordinator.addr == "":0"" { -- return nil -- } -- r.Coordinator = coordinator -- -- // this can all go away in 2.0, but we have to fill in deprecated fields to maintain -- // backwards compatibility -- host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) -- if err != nil { -- return err -- } -- port, err := strconv.ParseInt(portstr, 10, 32) -- if err != nil { -- return err -- } -- r.CoordinatorID = r.Coordinator.ID() -- r.CoordinatorHost = host -- r.CoordinatorPort = int32(port) -- -- return nil --} -- --func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error { -- pe.putInt16(int16(r.Err)) -- if r.Coordinator != nil { -- host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) -- if err != nil { -- return err -- } -- port, err := strconv.ParseInt(portstr, 10, 32) -- if err != nil { -- return err -- } -- pe.putInt32(r.Coordinator.ID()) -- if err := pe.putString(host); err != nil { -- return err -- } -- pe.putInt32(int32(port)) -- return nil -- } -- pe.putInt32(r.CoordinatorID) -- if err := pe.putString(r.CoordinatorHost); err != nil { -- return err -- } -- pe.putInt32(r.CoordinatorPort) -- return nil --} -- --func (r *ConsumerMetadataResponse) key() int16 { -- return 10 --} -- --func (r *ConsumerMetadataResponse) version() int16 { -- return 0 --} -- --func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion { -- return V0_8_2_0 --} -diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go -deleted file mode 100644 -index 1f144431a8bf5..0000000000000 ---- a/vendor/github.com/Shopify/sarama/crc32_field.go -+++ /dev/null -@@ -1,69 +0,0 @@ --package sarama -- --import ( -- ""encoding/binary"" -- ""fmt"" -- ""hash/crc32"" --) -- --type crcPolynomial int8 -- --const ( -- crcIEEE crcPolynomial = iota -- crcCastagnoli --) -- --var castagnoliTable = crc32.MakeTable(crc32.Castagnoli) -- --// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s. --type crc32Field struct { -- startOffset int -- polynomial crcPolynomial --} -- --func (c *crc32Field) saveOffset(in int) { -- c.startOffset = in --} -- --func (c *crc32Field) reserveLength() int { -- return 4 --} -- --func newCRC32Field(polynomial crcPolynomial) *crc32Field { -- return &crc32Field{polynomial: polynomial} --} -- --func (c *crc32Field) run(curOffset int, buf []byte) error { -- crc, err := c.crc(curOffset, buf) -- if err != nil { -- return err -- } -- binary.BigEndian.PutUint32(buf[c.startOffset:], crc) -- return nil --} -- --func (c *crc32Field) check(curOffset int, buf []byte) error { -- crc, err := c.crc(curOffset, buf) -- if err != nil { -- return err -- } -- -- expected := binary.BigEndian.Uint32(buf[c.startOffset:]) -- if crc != expected { -- return PacketDecodingError{fmt.Sprintf(""CRC didn't match expected %#x got %#x"", expected, crc)} -- } -- -- return nil --} --func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) { -- var tab *crc32.Table -- switch c.polynomial { -- case crcIEEE: -- tab = crc32.IEEETable -- case crcCastagnoli: -- tab = castagnoliTable -- default: -- return 0, PacketDecodingError{""invalid CRC type""} -- } -- return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil --} -diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go -deleted file mode 100644 -index af321e99466e5..0000000000000 ---- a/vendor/github.com/Shopify/sarama/create_partitions_request.go -+++ /dev/null -@@ -1,121 +0,0 @@ --package sarama -- --import ""time"" -- --type CreatePartitionsRequest struct { -- TopicPartitions map[string]*TopicPartition -- Timeout time.Duration -- ValidateOnly bool --} -- --func (c *CreatePartitionsRequest) encode(pe packetEncoder) error { -- if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil { -- return err -- } -- -- for topic, partition := range c.TopicPartitions { -- if err := pe.putString(topic); err != nil { -- return err -- } -- if err := partition.encode(pe); err != nil { -- return err -- } -- } -- -- pe.putInt32(int32(c.Timeout / time.Millisecond)) -- -- pe.putBool(c.ValidateOnly) -- -- return nil --} -- --func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) { -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- c.TopicPartitions = make(map[string]*TopicPartition, n) -- for i := 0; i < n; i++ { -- topic, err := pd.getString() -- if err != nil { -- return err -- } -- c.TopicPartitions[topic] = new(TopicPartition) -- if err := c.TopicPartitions[topic].decode(pd, version); err != nil { -- return err -- } -- } -- -- timeout, err := pd.getInt32() -- if err != nil { -- return err -- } -- c.Timeout = time.Duration(timeout) * time.Millisecond -- -- if c.ValidateOnly, err = pd.getBool(); err != nil { -- return err -- } -- -- return nil --} -- --func (r *CreatePartitionsRequest) key() int16 { -- return 37 --} -- --func (r *CreatePartitionsRequest) version() int16 { -- return 0 --} -- --func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion { -- return V1_0_0_0 --} -- --type TopicPartition struct { -- Count int32 -- Assignment [][]int32 --} -- --func (t *TopicPartition) encode(pe packetEncoder) error { -- pe.putInt32(t.Count) -- -- if len(t.Assignment) == 0 { -- pe.putInt32(-1) -- return nil -- } -- -- if err := pe.putArrayLength(len(t.Assignment)); err != nil { -- return err -- } -- -- for _, assign := range t.Assignment { -- if err := pe.putInt32Array(assign); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) { -- if t.Count, err = pd.getInt32(); err != nil { -- return err -- } -- -- n, err := pd.getInt32() -- if err != nil { -- return err -- } -- if n <= 0 { -- return nil -- } -- t.Assignment = make([][]int32, n) -- -- for i := 0; i < int(n); i++ { -- if t.Assignment[i], err = pd.getInt32Array(); err != nil { -- return err -- } -- } -- -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go -deleted file mode 100644 -index abd621c64ec42..0000000000000 ---- a/vendor/github.com/Shopify/sarama/create_partitions_response.go -+++ /dev/null -@@ -1,94 +0,0 @@ --package sarama -- --import ""time"" -- --type CreatePartitionsResponse struct { -- ThrottleTime time.Duration -- TopicPartitionErrors map[string]*TopicPartitionError --} -- --func (c *CreatePartitionsResponse) encode(pe packetEncoder) error { -- pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) -- if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil { -- return err -- } -- -- for topic, partitionError := range c.TopicPartitionErrors { -- if err := pe.putString(topic); err != nil { -- return err -- } -- if err := partitionError.encode(pe); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) { -- throttleTime, err := pd.getInt32() -- if err != nil { -- return err -- } -- c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n) -- for i := 0; i < n; i++ { -- topic, err := pd.getString() -- if err != nil { -- return err -- } -- c.TopicPartitionErrors[topic] = new(TopicPartitionError) -- if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (r *CreatePartitionsResponse) key() int16 { -- return 37 --} -- --func (r *CreatePartitionsResponse) version() int16 { -- return 0 --} -- --func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion { -- return V1_0_0_0 --} -- --type TopicPartitionError struct { -- Err KError -- ErrMsg *string --} -- --func (t *TopicPartitionError) encode(pe packetEncoder) error { -- pe.putInt16(int16(t.Err)) -- -- if err := pe.putNullableString(t.ErrMsg); err != nil { -- return err -- } -- -- return nil --} -- --func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) { -- kerr, err := pd.getInt16() -- if err != nil { -- return err -- } -- t.Err = KError(kerr) -- -- if t.ErrMsg, err = pd.getNullableString(); err != nil { -- return err -- } -- -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go -deleted file mode 100644 -index 709c0a44e7133..0000000000000 ---- a/vendor/github.com/Shopify/sarama/create_topics_request.go -+++ /dev/null -@@ -1,174 +0,0 @@ --package sarama -- --import ( -- ""time"" --) -- --type CreateTopicsRequest struct { -- Version int16 -- -- TopicDetails map[string]*TopicDetail -- Timeout time.Duration -- ValidateOnly bool --} -- --func (c *CreateTopicsRequest) encode(pe packetEncoder) error { -- if err := pe.putArrayLength(len(c.TopicDetails)); err != nil { -- return err -- } -- for topic, detail := range c.TopicDetails { -- if err := pe.putString(topic); err != nil { -- return err -- } -- if err := detail.encode(pe); err != nil { -- return err -- } -- } -- -- pe.putInt32(int32(c.Timeout / time.Millisecond)) -- -- if c.Version >= 1 { -- pe.putBool(c.ValidateOnly) -- } -- -- return nil --} -- --func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) { -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- c.TopicDetails = make(map[string]*TopicDetail, n) -- -- for i := 0; i < n; i++ { -- topic, err := pd.getString() -- if err != nil { -- return err -- } -- c.TopicDetails[topic] = new(TopicDetail) -- if err = c.TopicDetails[topic].decode(pd, version); err != nil { -- return err -- } -- } -- -- timeout, err := pd.getInt32() -- if err != nil { -- return err -- } -- c.Timeout = time.Duration(timeout) * time.Millisecond -- -- if version >= 1 { -- c.ValidateOnly, err = pd.getBool() -- if err != nil { -- return err -- } -- -- c.Version = version -- } -- -- return nil --} -- --func (c *CreateTopicsRequest) key() int16 { -- return 19 --} -- --func (c *CreateTopicsRequest) version() int16 { -- return c.Version --} -- --func (c *CreateTopicsRequest) requiredVersion() KafkaVersion { -- switch c.Version { -- case 2: -- return V1_0_0_0 -- case 1: -- return V0_11_0_0 -- default: -- return V0_10_1_0 -- } --} -- --type TopicDetail struct { -- NumPartitions int32 -- ReplicationFactor int16 -- ReplicaAssignment map[int32][]int32 -- ConfigEntries map[string]*string --} -- --func (t *TopicDetail) encode(pe packetEncoder) error { -- pe.putInt32(t.NumPartitions) -- pe.putInt16(t.ReplicationFactor) -- -- if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil { -- return err -- } -- for partition, assignment := range t.ReplicaAssignment { -- pe.putInt32(partition) -- if err := pe.putInt32Array(assignment); err != nil { -- return err -- } -- } -- -- if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil { -- return err -- } -- for configKey, configValue := range t.ConfigEntries { -- if err := pe.putString(configKey); err != nil { -- return err -- } -- if err := pe.putNullableString(configValue); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) { -- if t.NumPartitions, err = pd.getInt32(); err != nil { -- return err -- } -- if t.ReplicationFactor, err = pd.getInt16(); err != nil { -- return err -- } -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- if n > 0 { -- t.ReplicaAssignment = make(map[int32][]int32, n) -- for i := 0; i < n; i++ { -- replica, err := pd.getInt32() -- if err != nil { -- return err -- } -- if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil { -- return err -- } -- } -- } -- -- n, err = pd.getArrayLength() -- if err != nil { -- return err -- } -- -- if n > 0 { -- t.ConfigEntries = make(map[string]*string, n) -- for i := 0; i < n; i++ { -- configKey, err := pd.getString() -- if err != nil { -- return err -- } -- if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { -- return err -- } -- } -- } -- -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go -deleted file mode 100644 -index 66207e00c5d4f..0000000000000 ---- a/vendor/github.com/Shopify/sarama/create_topics_response.go -+++ /dev/null -@@ -1,112 +0,0 @@ --package sarama -- --import ""time"" -- --type CreateTopicsResponse struct { -- Version int16 -- ThrottleTime time.Duration -- TopicErrors map[string]*TopicError --} -- --func (c *CreateTopicsResponse) encode(pe packetEncoder) error { -- if c.Version >= 2 { -- pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) -- } -- -- if err := pe.putArrayLength(len(c.TopicErrors)); err != nil { -- return err -- } -- for topic, topicError := range c.TopicErrors { -- if err := pe.putString(topic); err != nil { -- return err -- } -- if err := topicError.encode(pe, c.Version); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) { -- c.Version = version -- -- if version >= 2 { -- throttleTime, err := pd.getInt32() -- if err != nil { -- return err -- } -- c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond -- } -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- c.TopicErrors = make(map[string]*TopicError, n) -- for i := 0; i < n; i++ { -- topic, err := pd.getString() -- if err != nil { -- return err -- } -- c.TopicErrors[topic] = new(TopicError) -- if err := c.TopicErrors[topic].decode(pd, version); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (c *CreateTopicsResponse) key() int16 { -- return 19 --} -- --func (c *CreateTopicsResponse) version() int16 { -- return c.Version --} -- --func (c *CreateTopicsResponse) requiredVersion() KafkaVersion { -- switch c.Version { -- case 2: -- return V1_0_0_0 -- case 1: -- return V0_11_0_0 -- default: -- return V0_10_1_0 -- } --} -- --type TopicError struct { -- Err KError -- ErrMsg *string --} -- --func (t *TopicError) encode(pe packetEncoder, version int16) error { -- pe.putInt16(int16(t.Err)) -- -- if version >= 1 { -- if err := pe.putNullableString(t.ErrMsg); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (t *TopicError) decode(pd packetDecoder, version int16) (err error) { -- kErr, err := pd.getInt16() -- if err != nil { -- return err -- } -- t.Err = KError(kErr) -- -- if version >= 1 { -- if t.ErrMsg, err = pd.getNullableString(); err != nil { -- return err -- } -- } -- -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go -deleted file mode 100644 -index ed9089ea4789c..0000000000000 ---- a/vendor/github.com/Shopify/sarama/delete_topics_request.go -+++ /dev/null -@@ -1,41 +0,0 @@ --package sarama -- --import ""time"" -- --type DeleteTopicsRequest struct { -- Topics []string -- Timeout time.Duration --} -- --func (d *DeleteTopicsRequest) encode(pe packetEncoder) error { -- if err := pe.putStringArray(d.Topics); err != nil { -- return err -- } -- pe.putInt32(int32(d.Timeout / time.Millisecond)) -- -- return nil --} -- --func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) { -- if d.Topics, err = pd.getStringArray(); err != nil { -- return err -- } -- timeout, err := pd.getInt32() -- if err != nil { -- return err -- } -- d.Timeout = time.Duration(timeout) * time.Millisecond -- return nil --} -- --func (d *DeleteTopicsRequest) key() int16 { -- return 20 --} -- --func (d *DeleteTopicsRequest) version() int16 { -- return 0 --} -- --func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion { -- return V0_10_1_0 --} -diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go -deleted file mode 100644 -index 34225460a31fd..0000000000000 ---- a/vendor/github.com/Shopify/sarama/delete_topics_response.go -+++ /dev/null -@@ -1,78 +0,0 @@ --package sarama -- --import ""time"" -- --type DeleteTopicsResponse struct { -- Version int16 -- ThrottleTime time.Duration -- TopicErrorCodes map[string]KError --} -- --func (d *DeleteTopicsResponse) encode(pe packetEncoder) error { -- if d.Version >= 1 { -- pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) -- } -- -- if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil { -- return err -- } -- for topic, errorCode := range d.TopicErrorCodes { -- if err := pe.putString(topic); err != nil { -- return err -- } -- pe.putInt16(int16(errorCode)) -- } -- -- return nil --} -- --func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) { -- if version >= 1 { -- throttleTime, err := pd.getInt32() -- if err != nil { -- return err -- } -- d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond -- -- d.Version = version -- } -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- d.TopicErrorCodes = make(map[string]KError, n) -- -- for i := 0; i < n; i++ { -- topic, err := pd.getString() -- if err != nil { -- return err -- } -- errorCode, err := pd.getInt16() -- if err != nil { -- return err -- } -- -- d.TopicErrorCodes[topic] = KError(errorCode) -- } -- -- return nil --} -- --func (d *DeleteTopicsResponse) key() int16 { -- return 20 --} -- --func (d *DeleteTopicsResponse) version() int16 { -- return d.Version --} -- --func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion { -- switch d.Version { -- case 1: -- return V0_11_0_0 -- default: -- return V0_10_1_0 -- } --} -diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go -deleted file mode 100644 -index 7a7cffc3fb278..0000000000000 ---- a/vendor/github.com/Shopify/sarama/describe_configs_request.go -+++ /dev/null -@@ -1,91 +0,0 @@ --package sarama -- --type ConfigResource struct { -- Type ConfigResourceType -- Name string -- ConfigNames []string --} -- --type DescribeConfigsRequest struct { -- Resources []*ConfigResource --} -- --func (r *DescribeConfigsRequest) encode(pe packetEncoder) error { -- if err := pe.putArrayLength(len(r.Resources)); err != nil { -- return err -- } -- -- for _, c := range r.Resources { -- pe.putInt8(int8(c.Type)) -- if err := pe.putString(c.Name); err != nil { -- return err -- } -- -- if len(c.ConfigNames) == 0 { -- pe.putInt32(-1) -- continue -- } -- if err := pe.putStringArray(c.ConfigNames); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) { -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- r.Resources = make([]*ConfigResource, n) -- -- for i := 0; i < n; i++ { -- r.Resources[i] = &ConfigResource{} -- t, err := pd.getInt8() -- if err != nil { -- return err -- } -- r.Resources[i].Type = ConfigResourceType(t) -- name, err := pd.getString() -- if err != nil { -- return err -- } -- r.Resources[i].Name = name -- -- confLength, err := pd.getArrayLength() -- -- if err != nil { -- return err -- } -- -- if confLength == -1 { -- continue -- } -- -- cfnames := make([]string, confLength) -- for i := 0; i < confLength; i++ { -- s, err := pd.getString() -- if err != nil { -- return err -- } -- cfnames[i] = s -- } -- r.Resources[i].ConfigNames = cfnames -- } -- -- return nil --} -- --func (r *DescribeConfigsRequest) key() int16 { -- return 32 --} -- --func (r *DescribeConfigsRequest) version() int16 { -- return 0 --} -- --func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go -deleted file mode 100644 -index 6e5d30e4f0918..0000000000000 ---- a/vendor/github.com/Shopify/sarama/describe_configs_response.go -+++ /dev/null -@@ -1,188 +0,0 @@ --package sarama -- --import ""time"" -- --type DescribeConfigsResponse struct { -- ThrottleTime time.Duration -- Resources []*ResourceResponse --} -- --type ResourceResponse struct { -- ErrorCode int16 -- ErrorMsg string -- Type ConfigResourceType -- Name string -- Configs []*ConfigEntry --} -- --type ConfigEntry struct { -- Name string -- Value string -- ReadOnly bool -- Default bool -- Sensitive bool --} -- --func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) { -- pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) -- if err = pe.putArrayLength(len(r.Resources)); err != nil { -- return err -- } -- -- for _, c := range r.Resources { -- if err = c.encode(pe); err != nil { -- return err -- } -- } -- return nil --} -- --func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) { -- throttleTime, err := pd.getInt32() -- if err != nil { -- return err -- } -- r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- r.Resources = make([]*ResourceResponse, n) -- for i := 0; i < n; i++ { -- rr := &ResourceResponse{} -- if err := rr.decode(pd, version); err != nil { -- return err -- } -- r.Resources[i] = rr -- } -- -- return nil --} -- --func (r *DescribeConfigsResponse) key() int16 { -- return 32 --} -- --func (r *DescribeConfigsResponse) version() int16 { -- return 0 --} -- --func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -- --func (r *ResourceResponse) encode(pe packetEncoder) (err error) { -- pe.putInt16(r.ErrorCode) -- -- if err = pe.putString(r.ErrorMsg); err != nil { -- return err -- } -- -- pe.putInt8(int8(r.Type)) -- -- if err = pe.putString(r.Name); err != nil { -- return err -- } -- -- if err = pe.putArrayLength(len(r.Configs)); err != nil { -- return err -- } -- -- for _, c := range r.Configs { -- if err = c.encode(pe); err != nil { -- return err -- } -- } -- return nil --} -- --func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) { -- ec, err := pd.getInt16() -- if err != nil { -- return err -- } -- r.ErrorCode = ec -- -- em, err := pd.getString() -- if err != nil { -- return err -- } -- r.ErrorMsg = em -- -- t, err := pd.getInt8() -- if err != nil { -- return err -- } -- r.Type = ConfigResourceType(t) -- -- name, err := pd.getString() -- if err != nil { -- return err -- } -- r.Name = name -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- r.Configs = make([]*ConfigEntry, n) -- for i := 0; i < n; i++ { -- c := &ConfigEntry{} -- if err := c.decode(pd, version); err != nil { -- return err -- } -- r.Configs[i] = c -- } -- return nil --} -- --func (r *ConfigEntry) encode(pe packetEncoder) (err error) { -- if err = pe.putString(r.Name); err != nil { -- return err -- } -- -- if err = pe.putString(r.Value); err != nil { -- return err -- } -- -- pe.putBool(r.ReadOnly) -- pe.putBool(r.Default) -- pe.putBool(r.Sensitive) -- return nil --} -- --func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) { -- name, err := pd.getString() -- if err != nil { -- return err -- } -- r.Name = name -- -- value, err := pd.getString() -- if err != nil { -- return err -- } -- r.Value = value -- -- read, err := pd.getBool() -- if err != nil { -- return err -- } -- r.ReadOnly = read -- -- de, err := pd.getBool() -- if err != nil { -- return err -- } -- r.Default = de -- -- sensitive, err := pd.getBool() -- if err != nil { -- return err -- } -- r.Sensitive = sensitive -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go -deleted file mode 100644 -index 1fb3567770857..0000000000000 ---- a/vendor/github.com/Shopify/sarama/describe_groups_request.go -+++ /dev/null -@@ -1,30 +0,0 @@ --package sarama -- --type DescribeGroupsRequest struct { -- Groups []string --} -- --func (r *DescribeGroupsRequest) encode(pe packetEncoder) error { -- return pe.putStringArray(r.Groups) --} -- --func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) { -- r.Groups, err = pd.getStringArray() -- return --} -- --func (r *DescribeGroupsRequest) key() int16 { -- return 15 --} -- --func (r *DescribeGroupsRequest) version() int16 { -- return 0 --} -- --func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { -- return V0_9_0_0 --} -- --func (r *DescribeGroupsRequest) AddGroup(group string) { -- r.Groups = append(r.Groups, group) --} -diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go -deleted file mode 100644 -index 542b3a971709c..0000000000000 ---- a/vendor/github.com/Shopify/sarama/describe_groups_response.go -+++ /dev/null -@@ -1,187 +0,0 @@ --package sarama -- --type DescribeGroupsResponse struct { -- Groups []*GroupDescription --} -- --func (r *DescribeGroupsResponse) encode(pe packetEncoder) error { -- if err := pe.putArrayLength(len(r.Groups)); err != nil { -- return err -- } -- -- for _, groupDescription := range r.Groups { -- if err := groupDescription.encode(pe); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) { -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- r.Groups = make([]*GroupDescription, n) -- for i := 0; i < n; i++ { -- r.Groups[i] = new(GroupDescription) -- if err := r.Groups[i].decode(pd); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (r *DescribeGroupsResponse) key() int16 { -- return 15 --} -- --func (r *DescribeGroupsResponse) version() int16 { -- return 0 --} -- --func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { -- return V0_9_0_0 --} -- --type GroupDescription struct { -- Err KError -- GroupId string -- State string -- ProtocolType string -- Protocol string -- Members map[string]*GroupMemberDescription --} -- --func (gd *GroupDescription) encode(pe packetEncoder) error { -- pe.putInt16(int16(gd.Err)) -- -- if err := pe.putString(gd.GroupId); err != nil { -- return err -- } -- if err := pe.putString(gd.State); err != nil { -- return err -- } -- if err := pe.putString(gd.ProtocolType); err != nil { -- return err -- } -- if err := pe.putString(gd.Protocol); err != nil { -- return err -- } -- -- if err := pe.putArrayLength(len(gd.Members)); err != nil { -- return err -- } -- -- for memberId, groupMemberDescription := range gd.Members { -- if err := pe.putString(memberId); err != nil { -- return err -- } -- if err := groupMemberDescription.encode(pe); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (gd *GroupDescription) decode(pd packetDecoder) (err error) { -- kerr, err := pd.getInt16() -- if err != nil { -- return err -- } -- -- gd.Err = KError(kerr) -- -- if gd.GroupId, err = pd.getString(); err != nil { -- return -- } -- if gd.State, err = pd.getString(); err != nil { -- return -- } -- if gd.ProtocolType, err = pd.getString(); err != nil { -- return -- } -- if gd.Protocol, err = pd.getString(); err != nil { -- return -- } -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- if n == 0 { -- return nil -- } -- -- gd.Members = make(map[string]*GroupMemberDescription) -- for i := 0; i < n; i++ { -- memberId, err := pd.getString() -- if err != nil { -- return err -- } -- -- gd.Members[memberId] = new(GroupMemberDescription) -- if err := gd.Members[memberId].decode(pd); err != nil { -- return err -- } -- } -- -- return nil --} -- --type GroupMemberDescription struct { -- ClientId string -- ClientHost string -- MemberMetadata []byte -- MemberAssignment []byte --} -- --func (gmd *GroupMemberDescription) encode(pe packetEncoder) error { -- if err := pe.putString(gmd.ClientId); err != nil { -- return err -- } -- if err := pe.putString(gmd.ClientHost); err != nil { -- return err -- } -- if err := pe.putBytes(gmd.MemberMetadata); err != nil { -- return err -- } -- if err := pe.putBytes(gmd.MemberAssignment); err != nil { -- return err -- } -- -- return nil --} -- --func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) { -- if gmd.ClientId, err = pd.getString(); err != nil { -- return -- } -- if gmd.ClientHost, err = pd.getString(); err != nil { -- return -- } -- if gmd.MemberMetadata, err = pd.getBytes(); err != nil { -- return -- } -- if gmd.MemberAssignment, err = pd.getBytes(); err != nil { -- return -- } -- -- return nil --} -- --func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { -- assignment := new(ConsumerGroupMemberAssignment) -- err := decode(gmd.MemberAssignment, assignment) -- return assignment, err --} -- --func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) { -- metadata := new(ConsumerGroupMemberMetadata) -- err := decode(gmd.MemberMetadata, metadata) -- return metadata, err --} -diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml -deleted file mode 100644 -index 294fcdb413b85..0000000000000 ---- a/vendor/github.com/Shopify/sarama/dev.yml -+++ /dev/null -@@ -1,10 +0,0 @@ --name: sarama -- --up: -- - go: -- version: '1.9' -- --commands: -- test: -- run: make test -- desc: 'run unit tests' -diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go -deleted file mode 100644 -index 7ce3bc0f6e27a..0000000000000 ---- a/vendor/github.com/Shopify/sarama/encoder_decoder.go -+++ /dev/null -@@ -1,89 +0,0 @@ --package sarama -- --import ( -- ""fmt"" -- -- ""github.com/rcrowley/go-metrics"" --) -- --// Encoder is the interface that wraps the basic Encode method. --// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules. --type encoder interface { -- encode(pe packetEncoder) error --} -- --// Encode takes an Encoder and turns it into bytes while potentially recording metrics. --func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) { -- if e == nil { -- return nil, nil -- } -- -- var prepEnc prepEncoder -- var realEnc realEncoder -- -- err := e.encode(&prepEnc) -- if err != nil { -- return nil, err -- } -- -- if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) { -- return nil, PacketEncodingError{fmt.Sprintf(""invalid request size (%d)"", prepEnc.length)} -- } -- -- realEnc.raw = make([]byte, prepEnc.length) -- realEnc.registry = metricRegistry -- err = e.encode(&realEnc) -- if err != nil { -- return nil, err -- } -- -- return realEnc.raw, nil --} -- --// Decoder is the interface that wraps the basic Decode method. --// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules. --type decoder interface { -- decode(pd packetDecoder) error --} -- --type versionedDecoder interface { -- decode(pd packetDecoder, version int16) error --} -- --// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes, --// interpreted using Kafka's encoding rules. --func decode(buf []byte, in decoder) error { -- if buf == nil { -- return nil -- } -- -- helper := realDecoder{raw: buf} -- err := in.decode(&helper) -- if err != nil { -- return err -- } -- -- if helper.off != len(buf) { -- return PacketDecodingError{""invalid length""} -- } -- -- return nil --} -- --func versionedDecode(buf []byte, in versionedDecoder, version int16) error { -- if buf == nil { -- return nil -- } -- -- helper := realDecoder{raw: buf} -- err := in.decode(&helper, version) -- if err != nil { -- return err -- } -- -- if helper.off != len(buf) { -- return PacketDecodingError{""invalid length""} -- } -- -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/Shopify/sarama/end_txn_request.go -deleted file mode 100644 -index 2cd9b506d3fc6..0000000000000 ---- a/vendor/github.com/Shopify/sarama/end_txn_request.go -+++ /dev/null -@@ -1,50 +0,0 @@ --package sarama -- --type EndTxnRequest struct { -- TransactionalID string -- ProducerID int64 -- ProducerEpoch int16 -- TransactionResult bool --} -- --func (a *EndTxnRequest) encode(pe packetEncoder) error { -- if err := pe.putString(a.TransactionalID); err != nil { -- return err -- } -- -- pe.putInt64(a.ProducerID) -- -- pe.putInt16(a.ProducerEpoch) -- -- pe.putBool(a.TransactionResult) -- -- return nil --} -- --func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) { -- if a.TransactionalID, err = pd.getString(); err != nil { -- return err -- } -- if a.ProducerID, err = pd.getInt64(); err != nil { -- return err -- } -- if a.ProducerEpoch, err = pd.getInt16(); err != nil { -- return err -- } -- if a.TransactionResult, err = pd.getBool(); err != nil { -- return err -- } -- return nil --} -- --func (a *EndTxnRequest) key() int16 { -- return 26 --} -- --func (a *EndTxnRequest) version() int16 { -- return 0 --} -- --func (a *EndTxnRequest) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go -deleted file mode 100644 -index 33b27e33d493f..0000000000000 ---- a/vendor/github.com/Shopify/sarama/end_txn_response.go -+++ /dev/null -@@ -1,44 +0,0 @@ --package sarama -- --import ( -- ""time"" --) -- --type EndTxnResponse struct { -- ThrottleTime time.Duration -- Err KError --} -- --func (e *EndTxnResponse) encode(pe packetEncoder) error { -- pe.putInt32(int32(e.ThrottleTime / time.Millisecond)) -- pe.putInt16(int16(e.Err)) -- return nil --} -- --func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) { -- throttleTime, err := pd.getInt32() -- if err != nil { -- return err -- } -- e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond -- -- kerr, err := pd.getInt16() -- if err != nil { -- return err -- } -- e.Err = KError(kerr) -- -- return nil --} -- --func (e *EndTxnResponse) key() int16 { -- return 25 --} -- --func (e *EndTxnResponse) version() int16 { -- return 0 --} -- --func (e *EndTxnResponse) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go -deleted file mode 100644 -index 54f431a4a91e9..0000000000000 ---- a/vendor/github.com/Shopify/sarama/errors.go -+++ /dev/null -@@ -1,273 +0,0 @@ --package sarama -- --import ( -- ""errors"" -- ""fmt"" --) -- --// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored --// or otherwise failed to respond. --var ErrOutOfBrokers = errors.New(""kafka: client has run out of available brokers to talk to (Is your cluster reachable?)"") -- --// ErrClosedClient is the error returned when a method is called on a client that has been closed. --var ErrClosedClient = errors.New(""kafka: tried to use a client that was closed"") -- --// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does --// not contain the expected information. --var ErrIncompleteResponse = errors.New(""kafka: response did not contain all the expected topic/partition blocks"") -- --// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index --// (meaning one outside of the range [0...numPartitions-1]). --var ErrInvalidPartition = errors.New(""kafka: partitioner returned an invalid partition index"") -- --// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting. --var ErrAlreadyConnected = errors.New(""kafka: broker connection already initiated"") -- --// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected. --var ErrNotConnected = errors.New(""kafka: broker not connected"") -- --// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected --// when requesting messages, since as an optimization the server is allowed to return a partial message at the end --// of the message set. --var ErrInsufficientData = errors.New(""kafka: insufficient data to decode packet, more bytes expected"") -- --// ErrShuttingDown is returned when a producer receives a message during shutdown. --var ErrShuttingDown = errors.New(""kafka: message received by producer in process of shutting down"") -- --// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max --var ErrMessageTooLarge = errors.New(""kafka: message is larger than Consumer.Fetch.Max"") -- --// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing --// a RecordBatch. --var ErrConsumerOffsetNotAdvanced = errors.New(""kafka: consumer offset was not advanced after a RecordBatch"") -- --// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example, --// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that. --type PacketEncodingError struct { -- Info string --} -- --func (err PacketEncodingError) Error() string { -- return fmt.Sprintf(""kafka: error encoding packet: %s"", err.Info) --} -- --// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response. --// This can be a bad CRC or length field, or any other invalid value. --type PacketDecodingError struct { -- Info string --} -- --func (err PacketDecodingError) Error() string { -- return fmt.Sprintf(""kafka: error decoding packet: %s"", err.Info) --} -- --// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer) --// when the specified configuration is invalid. --type ConfigurationError string -- --func (err ConfigurationError) Error() string { -- return ""kafka: invalid configuration ("" + string(err) + "")"" --} -- --// KError is the type of error that can be returned directly by the Kafka broker. --// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes --type KError int16 -- --// Numeric error codes returned by the Kafka server. --const ( -- ErrNoError KError = 0 -- ErrUnknown KError = -1 -- ErrOffsetOutOfRange KError = 1 -- ErrInvalidMessage KError = 2 -- ErrUnknownTopicOrPartition KError = 3 -- ErrInvalidMessageSize KError = 4 -- ErrLeaderNotAvailable KError = 5 -- ErrNotLeaderForPartition KError = 6 -- ErrRequestTimedOut KError = 7 -- ErrBrokerNotAvailable KError = 8 -- ErrReplicaNotAvailable KError = 9 -- ErrMessageSizeTooLarge KError = 10 -- ErrStaleControllerEpochCode KError = 11 -- ErrOffsetMetadataTooLarge KError = 12 -- ErrNetworkException KError = 13 -- ErrOffsetsLoadInProgress KError = 14 -- ErrConsumerCoordinatorNotAvailable KError = 15 -- ErrNotCoordinatorForConsumer KError = 16 -- ErrInvalidTopic KError = 17 -- ErrMessageSetSizeTooLarge KError = 18 -- ErrNotEnoughReplicas KError = 19 -- ErrNotEnoughReplicasAfterAppend KError = 20 -- ErrInvalidRequiredAcks KError = 21 -- ErrIllegalGeneration KError = 22 -- ErrInconsistentGroupProtocol KError = 23 -- ErrInvalidGroupId KError = 24 -- ErrUnknownMemberId KError = 25 -- ErrInvalidSessionTimeout KError = 26 -- ErrRebalanceInProgress KError = 27 -- ErrInvalidCommitOffsetSize KError = 28 -- ErrTopicAuthorizationFailed KError = 29 -- ErrGroupAuthorizationFailed KError = 30 -- ErrClusterAuthorizationFailed KError = 31 -- ErrInvalidTimestamp KError = 32 -- ErrUnsupportedSASLMechanism KError = 33 -- ErrIllegalSASLState KError = 34 -- ErrUnsupportedVersion KError = 35 -- ErrTopicAlreadyExists KError = 36 -- ErrInvalidPartitions KError = 37 -- ErrInvalidReplicationFactor KError = 38 -- ErrInvalidReplicaAssignment KError = 39 -- ErrInvalidConfig KError = 40 -- ErrNotController KError = 41 -- ErrInvalidRequest KError = 42 -- ErrUnsupportedForMessageFormat KError = 43 -- ErrPolicyViolation KError = 44 -- ErrOutOfOrderSequenceNumber KError = 45 -- ErrDuplicateSequenceNumber KError = 46 -- ErrInvalidProducerEpoch KError = 47 -- ErrInvalidTxnState KError = 48 -- ErrInvalidProducerIDMapping KError = 49 -- ErrInvalidTransactionTimeout KError = 50 -- ErrConcurrentTransactions KError = 51 -- ErrTransactionCoordinatorFenced KError = 52 -- ErrTransactionalIDAuthorizationFailed KError = 53 -- ErrSecurityDisabled KError = 54 -- ErrOperationNotAttempted KError = 55 -- ErrKafkaStorageError KError = 56 -- ErrLogDirNotFound KError = 57 -- ErrSASLAuthenticationFailed KError = 58 -- ErrUnknownProducerID KError = 59 -- ErrReassignmentInProgress KError = 60 --) -- --func (err KError) Error() string { -- // Error messages stolen/adapted from -- // https://kafka.apache.org/protocol#protocol_error_codes -- switch err { -- case ErrNoError: -- return ""kafka server: Not an error, why are you printing me?"" -- case ErrUnknown: -- return ""kafka server: Unexpected (unknown?) server error."" -- case ErrOffsetOutOfRange: -- return ""kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."" -- case ErrInvalidMessage: -- return ""kafka server: Message contents does not match its CRC."" -- case ErrUnknownTopicOrPartition: -- return ""kafka server: Request was for a topic or partition that does not exist on this broker."" -- case ErrInvalidMessageSize: -- return ""kafka server: The message has a negative size."" -- case ErrLeaderNotAvailable: -- return ""kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."" -- case ErrNotLeaderForPartition: -- return ""kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."" -- case ErrRequestTimedOut: -- return ""kafka server: Request exceeded the user-specified time limit in the request."" -- case ErrBrokerNotAvailable: -- return ""kafka server: Broker not available. Not a client facing error, we should never receive this!!!"" -- case ErrReplicaNotAvailable: -- return ""kafka server: Replica information not available, one or more brokers are down."" -- case ErrMessageSizeTooLarge: -- return ""kafka server: Message was too large, server rejected it to avoid allocation error."" -- case ErrStaleControllerEpochCode: -- return ""kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."" -- case ErrOffsetMetadataTooLarge: -- return ""kafka server: Specified a string larger than the configured maximum for offset metadata."" -- case ErrNetworkException: -- return ""kafka server: The server disconnected before a response was received."" -- case ErrOffsetsLoadInProgress: -- return ""kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."" -- case ErrConsumerCoordinatorNotAvailable: -- return ""kafka server: Offset's topic has not yet been created."" -- case ErrNotCoordinatorForConsumer: -- return ""kafka server: Request was for a consumer group that is not coordinated by this broker."" -- case ErrInvalidTopic: -- return ""kafka server: The request attempted to perform an operation on an invalid topic."" -- case ErrMessageSetSizeTooLarge: -- return ""kafka server: The request included message batch larger than the configured segment size on the server."" -- case ErrNotEnoughReplicas: -- return ""kafka server: Messages are rejected since there are fewer in-sync replicas than required."" -- case ErrNotEnoughReplicasAfterAppend: -- return ""kafka server: Messages are written to the log, but to fewer in-sync replicas than required."" -- case ErrInvalidRequiredAcks: -- return ""kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."" -- case ErrIllegalGeneration: -- return ""kafka server: The provided generation id is not the current generation."" -- case ErrInconsistentGroupProtocol: -- return ""kafka server: The provider group protocol type is incompatible with the other members."" -- case ErrInvalidGroupId: -- return ""kafka server: The provided group id was empty."" -- case ErrUnknownMemberId: -- return ""kafka server: The provided member is not known in the current generation."" -- case ErrInvalidSessionTimeout: -- return ""kafka server: The provided session timeout is outside the allowed range."" -- case ErrRebalanceInProgress: -- return ""kafka server: A rebalance for the group is in progress. Please re-join the group."" -- case ErrInvalidCommitOffsetSize: -- return ""kafka server: The provided commit metadata was too large."" -- case ErrTopicAuthorizationFailed: -- return ""kafka server: The client is not authorized to access this topic."" -- case ErrGroupAuthorizationFailed: -- return ""kafka server: The client is not authorized to access this group."" -- case ErrClusterAuthorizationFailed: -- return ""kafka server: The client is not authorized to send this request type."" -- case ErrInvalidTimestamp: -- return ""kafka server: The timestamp of the message is out of acceptable range."" -- case ErrUnsupportedSASLMechanism: -- return ""kafka server: The broker does not support the requested SASL mechanism."" -- case ErrIllegalSASLState: -- return ""kafka server: Request is not valid given the current SASL state."" -- case ErrUnsupportedVersion: -- return ""kafka server: The version of API is not supported."" -- case ErrTopicAlreadyExists: -- return ""kafka server: Topic with this name already exists."" -- case ErrInvalidPartitions: -- return ""kafka server: Number of partitions is invalid."" -- case ErrInvalidReplicationFactor: -- return ""kafka server: Replication-factor is invalid."" -- case ErrInvalidReplicaAssignment: -- return ""kafka server: Replica assignment is invalid."" -- case ErrInvalidConfig: -- return ""kafka server: Configuration is invalid."" -- case ErrNotController: -- return ""kafka server: This is not the correct controller for this cluster."" -- case ErrInvalidRequest: -- return ""kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."" -- case ErrUnsupportedForMessageFormat: -- return ""kafka server: The requested operation is not supported by the message format version."" -- case ErrPolicyViolation: -- return ""kafka server: Request parameters do not satisfy the configured policy."" -- case ErrOutOfOrderSequenceNumber: -- return ""kafka server: The broker received an out of order sequence number."" -- case ErrDuplicateSequenceNumber: -- return ""kafka server: The broker received a duplicate sequence number."" -- case ErrInvalidProducerEpoch: -- return ""kafka server: Producer attempted an operation with an old epoch."" -- case ErrInvalidTxnState: -- return ""kafka server: The producer attempted a transactional operation in an invalid state."" -- case ErrInvalidProducerIDMapping: -- return ""kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id."" -- case ErrInvalidTransactionTimeout: -- return ""kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)."" -- case ErrConcurrentTransactions: -- return ""kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."" -- case ErrTransactionCoordinatorFenced: -- return ""kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."" -- case ErrTransactionalIDAuthorizationFailed: -- return ""kafka server: Transactional ID authorization failed."" -- case ErrSecurityDisabled: -- return ""kafka server: Security features are disabled."" -- case ErrOperationNotAttempted: -- return ""kafka server: The broker did not attempt to execute this operation."" -- case ErrKafkaStorageError: -- return ""kafka server: Disk error when trying to access log file on the disk."" -- case ErrLogDirNotFound: -- return ""kafka server: The specified log directory is not found in the broker config."" -- case ErrSASLAuthenticationFailed: -- return ""kafka server: SASL Authentication failed."" -- case ErrUnknownProducerID: -- return ""kafka server: The broker could not locate the producer metadata associated with the Producer ID."" -- case ErrReassignmentInProgress: -- return ""kafka server: A partition reassignment is in progress."" -- } -- -- return fmt.Sprintf(""Unknown error, how did this happen? Error code = %d"", err) --} -diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go -deleted file mode 100644 -index 8c8e3a5afc83f..0000000000000 ---- a/vendor/github.com/Shopify/sarama/fetch_request.go -+++ /dev/null -@@ -1,170 +0,0 @@ --package sarama -- --type fetchRequestBlock struct { -- fetchOffset int64 -- maxBytes int32 --} -- --func (b *fetchRequestBlock) encode(pe packetEncoder) error { -- pe.putInt64(b.fetchOffset) -- pe.putInt32(b.maxBytes) -- return nil --} -- --func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) { -- if b.fetchOffset, err = pd.getInt64(); err != nil { -- return err -- } -- if b.maxBytes, err = pd.getInt32(); err != nil { -- return err -- } -- return nil --} -- --// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See --// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at --// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes --type FetchRequest struct { -- MaxWaitTime int32 -- MinBytes int32 -- MaxBytes int32 -- Version int16 -- Isolation IsolationLevel -- blocks map[string]map[int32]*fetchRequestBlock --} -- --type IsolationLevel int8 -- --const ( -- ReadUncommitted IsolationLevel = 0 -- ReadCommitted IsolationLevel = 1 --) -- --func (r *FetchRequest) encode(pe packetEncoder) (err error) { -- pe.putInt32(-1) // replica ID is always -1 for clients -- pe.putInt32(r.MaxWaitTime) -- pe.putInt32(r.MinBytes) -- if r.Version >= 3 { -- pe.putInt32(r.MaxBytes) -- } -- if r.Version >= 4 { -- pe.putInt8(int8(r.Isolation)) -- } -- err = pe.putArrayLength(len(r.blocks)) -- if err != nil { -- return err -- } -- for topic, blocks := range r.blocks { -- err = pe.putString(topic) -- if err != nil { -- return err -- } -- err = pe.putArrayLength(len(blocks)) -- if err != nil { -- return err -- } -- for partition, block := range blocks { -- pe.putInt32(partition) -- err = block.encode(pe) -- if err != nil { -- return err -- } -- } -- } -- return nil --} -- --func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { -- r.Version = version -- if _, err = pd.getInt32(); err != nil { -- return err -- } -- if r.MaxWaitTime, err = pd.getInt32(); err != nil { -- return err -- } -- if r.MinBytes, err = pd.getInt32(); err != nil { -- return err -- } -- if r.Version >= 3 { -- if r.MaxBytes, err = pd.getInt32(); err != nil { -- return err -- } -- } -- if r.Version >= 4 { -- isolation, err := pd.getInt8() -- if err != nil { -- return err -- } -- r.Isolation = IsolationLevel(isolation) -- } -- topicCount, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- if topicCount == 0 { -- return nil -- } -- r.blocks = make(map[string]map[int32]*fetchRequestBlock) -- for i := 0; i < topicCount; i++ { -- topic, err := pd.getString() -- if err != nil { -- return err -- } -- partitionCount, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- r.blocks[topic] = make(map[int32]*fetchRequestBlock) -- for j := 0; j < partitionCount; j++ { -- partition, err := pd.getInt32() -- if err != nil { -- return err -- } -- fetchBlock := &fetchRequestBlock{} -- if err = fetchBlock.decode(pd); err != nil { -- return err -- } -- r.blocks[topic][partition] = fetchBlock -- } -- } -- return nil --} -- --func (r *FetchRequest) key() int16 { -- return 1 --} -- --func (r *FetchRequest) version() int16 { -- return r.Version --} -- --func (r *FetchRequest) requiredVersion() KafkaVersion { -- switch r.Version { -- case 1: -- return V0_9_0_0 -- case 2: -- return V0_10_0_0 -- case 3: -- return V0_10_1_0 -- case 4: -- return V0_11_0_0 -- default: -- return minVersion -- } --} -- --func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) { -- if r.blocks == nil { -- r.blocks = make(map[string]map[int32]*fetchRequestBlock) -- } -- -- if r.blocks[topic] == nil { -- r.blocks[topic] = make(map[int32]*fetchRequestBlock) -- } -- -- tmp := new(fetchRequestBlock) -- tmp.maxBytes = maxBytes -- tmp.fetchOffset = fetchOffset -- -- r.blocks[topic][partitionID] = tmp --} -diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go -deleted file mode 100644 -index 0e81ad89f434a..0000000000000 ---- a/vendor/github.com/Shopify/sarama/fetch_response.go -+++ /dev/null -@@ -1,385 +0,0 @@ --package sarama -- --import ( -- ""time"" --) -- --type AbortedTransaction struct { -- ProducerID int64 -- FirstOffset int64 --} -- --func (t *AbortedTransaction) decode(pd packetDecoder) (err error) { -- if t.ProducerID, err = pd.getInt64(); err != nil { -- return err -- } -- -- if t.FirstOffset, err = pd.getInt64(); err != nil { -- return err -- } -- -- return nil --} -- --func (t *AbortedTransaction) encode(pe packetEncoder) (err error) { -- pe.putInt64(t.ProducerID) -- pe.putInt64(t.FirstOffset) -- -- return nil --} -- --type FetchResponseBlock struct { -- Err KError -- HighWaterMarkOffset int64 -- LastStableOffset int64 -- AbortedTransactions []*AbortedTransaction -- Records *Records // deprecated: use FetchResponseBlock.Records -- RecordsSet []*Records -- Partial bool --} -- --func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { -- tmp, err := pd.getInt16() -- if err != nil { -- return err -- } -- b.Err = KError(tmp) -- -- b.HighWaterMarkOffset, err = pd.getInt64() -- if err != nil { -- return err -- } -- -- if version >= 4 { -- b.LastStableOffset, err = pd.getInt64() -- if err != nil { -- return err -- } -- -- numTransact, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- if numTransact >= 0 { -- b.AbortedTransactions = make([]*AbortedTransaction, numTransact) -- } -- -- for i := 0; i < numTransact; i++ { -- transact := new(AbortedTransaction) -- if err = transact.decode(pd); err != nil { -- return err -- } -- b.AbortedTransactions[i] = transact -- } -- } -- -- recordsSize, err := pd.getInt32() -- if err != nil { -- return err -- } -- -- recordsDecoder, err := pd.getSubset(int(recordsSize)) -- if err != nil { -- return err -- } -- -- b.RecordsSet = []*Records{} -- -- for recordsDecoder.remaining() > 0 { -- records := &Records{} -- if err := records.decode(recordsDecoder); err != nil { -- // If we have at least one decoded records, this is not an error -- if err == ErrInsufficientData { -- if len(b.RecordsSet) == 0 { -- b.Partial = true -- } -- break -- } -- return err -- } -- -- partial, err := records.isPartial() -- if err != nil { -- return err -- } -- -- // If we have at least one full records, we skip incomplete ones -- if partial && len(b.RecordsSet) > 0 { -- break -- } -- -- b.RecordsSet = append(b.RecordsSet, records) -- -- if b.Records == nil { -- b.Records = records -- } -- } -- -- return nil --} -- --func (b *FetchResponseBlock) numRecords() (int, error) { -- sum := 0 -- -- for _, records := range b.RecordsSet { -- count, err := records.numRecords() -- if err != nil { -- return 0, err -- } -- -- sum += count -- } -- -- return sum, nil --} -- --func (b *FetchResponseBlock) isPartial() (bool, error) { -- if b.Partial { -- return true, nil -- } -- -- if len(b.RecordsSet) == 1 { -- return b.RecordsSet[0].isPartial() -- } -- -- return false, nil --} -- --func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) { -- pe.putInt16(int16(b.Err)) -- -- pe.putInt64(b.HighWaterMarkOffset) -- -- if version >= 4 { -- pe.putInt64(b.LastStableOffset) -- -- if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil { -- return err -- } -- for _, transact := range b.AbortedTransactions { -- if err = transact.encode(pe); err != nil { -- return err -- } -- } -- } -- -- pe.push(&lengthField{}) -- for _, records := range b.RecordsSet { -- err = records.encode(pe) -- if err != nil { -- return err -- } -- } -- return pe.pop() --} -- --type FetchResponse struct { -- Blocks map[string]map[int32]*FetchResponseBlock -- ThrottleTime time.Duration -- Version int16 // v1 requires 0.9+, v2 requires 0.10+ --} -- --func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) { -- r.Version = version -- -- if r.Version >= 1 { -- throttle, err := pd.getInt32() -- if err != nil { -- return err -- } -- r.ThrottleTime = time.Duration(throttle) * time.Millisecond -- } -- -- numTopics, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics) -- for i := 0; i < numTopics; i++ { -- name, err := pd.getString() -- if err != nil { -- return err -- } -- -- numBlocks, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks) -- -- for j := 0; j < numBlocks; j++ { -- id, err := pd.getInt32() -- if err != nil { -- return err -- } -- -- block := new(FetchResponseBlock) -- err = block.decode(pd, version) -- if err != nil { -- return err -- } -- r.Blocks[name][id] = block -- } -- } -- -- return nil --} -- --func (r *FetchResponse) encode(pe packetEncoder) (err error) { -- if r.Version >= 1 { -- pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) -- } -- -- err = pe.putArrayLength(len(r.Blocks)) -- if err != nil { -- return err -- } -- -- for topic, partitions := range r.Blocks { -- err = pe.putString(topic) -- if err != nil { -- return err -- } -- -- err = pe.putArrayLength(len(partitions)) -- if err != nil { -- return err -- } -- -- for id, block := range partitions { -- pe.putInt32(id) -- err = block.encode(pe, r.Version) -- if err != nil { -- return err -- } -- } -- -- } -- return nil --} -- --func (r *FetchResponse) key() int16 { -- return 1 --} -- --func (r *FetchResponse) version() int16 { -- return r.Version --} -- --func (r *FetchResponse) requiredVersion() KafkaVersion { -- switch r.Version { -- case 1: -- return V0_9_0_0 -- case 2: -- return V0_10_0_0 -- case 3: -- return V0_10_1_0 -- case 4: -- return V0_11_0_0 -- default: -- return minVersion -- } --} -- --func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock { -- if r.Blocks == nil { -- return nil -- } -- -- if r.Blocks[topic] == nil { -- return nil -- } -- -- return r.Blocks[topic][partition] --} -- --func (r *FetchResponse) AddError(topic string, partition int32, err KError) { -- if r.Blocks == nil { -- r.Blocks = make(map[string]map[int32]*FetchResponseBlock) -- } -- partitions, ok := r.Blocks[topic] -- if !ok { -- partitions = make(map[int32]*FetchResponseBlock) -- r.Blocks[topic] = partitions -- } -- frb, ok := partitions[partition] -- if !ok { -- frb = new(FetchResponseBlock) -- partitions[partition] = frb -- } -- frb.Err = err --} -- --func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock { -- if r.Blocks == nil { -- r.Blocks = make(map[string]map[int32]*FetchResponseBlock) -- } -- partitions, ok := r.Blocks[topic] -- if !ok { -- partitions = make(map[int32]*FetchResponseBlock) -- r.Blocks[topic] = partitions -- } -- frb, ok := partitions[partition] -- if !ok { -- frb = new(FetchResponseBlock) -- partitions[partition] = frb -- } -- -- return frb --} -- --func encodeKV(key, value Encoder) ([]byte, []byte) { -- var kb []byte -- var vb []byte -- if key != nil { -- kb, _ = key.Encode() -- } -- if value != nil { -- vb, _ = value.Encode() -- } -- -- return kb, vb --} -- --func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { -- frb := r.getOrCreateBlock(topic, partition) -- kb, vb := encodeKV(key, value) -- msg := &Message{Key: kb, Value: vb} -- msgBlock := &MessageBlock{Msg: msg, Offset: offset} -- if len(frb.RecordsSet) == 0 { -- records := newLegacyRecords(&MessageSet{}) -- frb.RecordsSet = []*Records{&records} -- } -- set := frb.RecordsSet[0].msgSet -- set.Messages = append(set.Messages, msgBlock) --} -- --func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) { -- frb := r.getOrCreateBlock(topic, partition) -- kb, vb := encodeKV(key, value) -- rec := &Record{Key: kb, Value: vb, OffsetDelta: offset} -- if len(frb.RecordsSet) == 0 { -- records := newDefaultRecords(&RecordBatch{Version: 2}) -- frb.RecordsSet = []*Records{&records} -- } -- batch := frb.RecordsSet[0].recordBatch -- batch.addRecord(rec) --} -- --func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) { -- frb := r.getOrCreateBlock(topic, partition) -- if len(frb.RecordsSet) == 0 { -- records := newDefaultRecords(&RecordBatch{Version: 2}) -- frb.RecordsSet = []*Records{&records} -- } -- batch := frb.RecordsSet[0].recordBatch -- batch.LastOffsetDelta = offset --} -- --func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) { -- frb := r.getOrCreateBlock(topic, partition) -- frb.LastStableOffset = offset --} -diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go -deleted file mode 100644 -index ce49c4739727e..0000000000000 ---- a/vendor/github.com/Shopify/sarama/heartbeat_request.go -+++ /dev/null -@@ -1,47 +0,0 @@ --package sarama -- --type HeartbeatRequest struct { -- GroupId string -- GenerationId int32 -- MemberId string --} -- --func (r *HeartbeatRequest) encode(pe packetEncoder) error { -- if err := pe.putString(r.GroupId); err != nil { -- return err -- } -- -- pe.putInt32(r.GenerationId) -- -- if err := pe.putString(r.MemberId); err != nil { -- return err -- } -- -- return nil --} -- --func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) { -- if r.GroupId, err = pd.getString(); err != nil { -- return -- } -- if r.GenerationId, err = pd.getInt32(); err != nil { -- return -- } -- if r.MemberId, err = pd.getString(); err != nil { -- return -- } -- -- return nil --} -- --func (r *HeartbeatRequest) key() int16 { -- return 12 --} -- --func (r *HeartbeatRequest) version() int16 { -- return 0 --} -- --func (r *HeartbeatRequest) requiredVersion() KafkaVersion { -- return V0_9_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go -deleted file mode 100644 -index 766f5fdec6f3c..0000000000000 ---- a/vendor/github.com/Shopify/sarama/heartbeat_response.go -+++ /dev/null -@@ -1,32 +0,0 @@ --package sarama -- --type HeartbeatResponse struct { -- Err KError --} -- --func (r *HeartbeatResponse) encode(pe packetEncoder) error { -- pe.putInt16(int16(r.Err)) -- return nil --} -- --func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error { -- kerr, err := pd.getInt16() -- if err != nil { -- return err -- } -- r.Err = KError(kerr) -- -- return nil --} -- --func (r *HeartbeatResponse) key() int16 { -- return 12 --} -- --func (r *HeartbeatResponse) version() int16 { -- return 0 --} -- --func (r *HeartbeatResponse) requiredVersion() KafkaVersion { -- return V0_9_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go -deleted file mode 100644 -index 8ceb6c232554e..0000000000000 ---- a/vendor/github.com/Shopify/sarama/init_producer_id_request.go -+++ /dev/null -@@ -1,43 +0,0 @@ --package sarama -- --import ""time"" -- --type InitProducerIDRequest struct { -- TransactionalID *string -- TransactionTimeout time.Duration --} -- --func (i *InitProducerIDRequest) encode(pe packetEncoder) error { -- if err := pe.putNullableString(i.TransactionalID); err != nil { -- return err -- } -- pe.putInt32(int32(i.TransactionTimeout / time.Millisecond)) -- -- return nil --} -- --func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) { -- if i.TransactionalID, err = pd.getNullableString(); err != nil { -- return err -- } -- -- timeout, err := pd.getInt32() -- if err != nil { -- return err -- } -- i.TransactionTimeout = time.Duration(timeout) * time.Millisecond -- -- return nil --} -- --func (i *InitProducerIDRequest) key() int16 { -- return 22 --} -- --func (i *InitProducerIDRequest) version() int16 { -- return 0 --} -- --func (i *InitProducerIDRequest) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go -deleted file mode 100644 -index 1b32eb085b2b9..0000000000000 ---- a/vendor/github.com/Shopify/sarama/init_producer_id_response.go -+++ /dev/null -@@ -1,55 +0,0 @@ --package sarama -- --import ""time"" -- --type InitProducerIDResponse struct { -- ThrottleTime time.Duration -- Err KError -- ProducerID int64 -- ProducerEpoch int16 --} -- --func (i *InitProducerIDResponse) encode(pe packetEncoder) error { -- pe.putInt32(int32(i.ThrottleTime / time.Millisecond)) -- pe.putInt16(int16(i.Err)) -- pe.putInt64(i.ProducerID) -- pe.putInt16(i.ProducerEpoch) -- -- return nil --} -- --func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) { -- throttleTime, err := pd.getInt32() -- if err != nil { -- return err -- } -- i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond -- -- kerr, err := pd.getInt16() -- if err != nil { -- return err -- } -- i.Err = KError(kerr) -- -- if i.ProducerID, err = pd.getInt64(); err != nil { -- return err -- } -- -- if i.ProducerEpoch, err = pd.getInt16(); err != nil { -- return err -- } -- -- return nil --} -- --func (i *InitProducerIDResponse) key() int16 { -- return 22 --} -- --func (i *InitProducerIDResponse) version() int16 { -- return 0 --} -- --func (i *InitProducerIDResponse) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go -deleted file mode 100644 -index 3a7ba17122d33..0000000000000 ---- a/vendor/github.com/Shopify/sarama/join_group_request.go -+++ /dev/null -@@ -1,143 +0,0 @@ --package sarama -- --type GroupProtocol struct { -- Name string -- Metadata []byte --} -- --func (p *GroupProtocol) decode(pd packetDecoder) (err error) { -- p.Name, err = pd.getString() -- if err != nil { -- return err -- } -- p.Metadata, err = pd.getBytes() -- return err --} -- --func (p *GroupProtocol) encode(pe packetEncoder) (err error) { -- if err := pe.putString(p.Name); err != nil { -- return err -- } -- if err := pe.putBytes(p.Metadata); err != nil { -- return err -- } -- return nil --} -- --type JoinGroupRequest struct { -- GroupId string -- SessionTimeout int32 -- MemberId string -- ProtocolType string -- GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols -- OrderedGroupProtocols []*GroupProtocol --} -- --func (r *JoinGroupRequest) encode(pe packetEncoder) error { -- if err := pe.putString(r.GroupId); err != nil { -- return err -- } -- pe.putInt32(r.SessionTimeout) -- if err := pe.putString(r.MemberId); err != nil { -- return err -- } -- if err := pe.putString(r.ProtocolType); err != nil { -- return err -- } -- -- if len(r.GroupProtocols) > 0 { -- if len(r.OrderedGroupProtocols) > 0 { -- return PacketDecodingError{""cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest""} -- } -- -- if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil { -- return err -- } -- for name, metadata := range r.GroupProtocols { -- if err := pe.putString(name); err != nil { -- return err -- } -- if err := pe.putBytes(metadata); err != nil { -- return err -- } -- } -- } else { -- if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil { -- return err -- } -- for _, protocol := range r.OrderedGroupProtocols { -- if err := protocol.encode(pe); err != nil { -- return err -- } -- } -- } -- -- return nil --} -- --func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) { -- if r.GroupId, err = pd.getString(); err != nil { -- return -- } -- -- if r.SessionTimeout, err = pd.getInt32(); err != nil { -- return -- } -- -- if r.MemberId, err = pd.getString(); err != nil { -- return -- } -- -- if r.ProtocolType, err = pd.getString(); err != nil { -- return -- } -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- if n == 0 { -- return nil -- } -- -- r.GroupProtocols = make(map[string][]byte) -- for i := 0; i < n; i++ { -- protocol := &GroupProtocol{} -- if err := protocol.decode(pd); err != nil { -- return err -- } -- r.GroupProtocols[protocol.Name] = protocol.Metadata -- r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol) -- } -- -- return nil --} -- --func (r *JoinGroupRequest) key() int16 { -- return 11 --} -- --func (r *JoinGroupRequest) version() int16 { -- return 0 --} -- --func (r *JoinGroupRequest) requiredVersion() KafkaVersion { -- return V0_9_0_0 --} -- --func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) { -- r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{ -- Name: name, -- Metadata: metadata, -- }) --} -- --func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error { -- bin, err := encode(metadata, nil) -- if err != nil { -- return err -- } -- -- r.AddGroupProtocol(name, bin) -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go -deleted file mode 100644 -index 6d35fe36494e7..0000000000000 ---- a/vendor/github.com/Shopify/sarama/join_group_response.go -+++ /dev/null -@@ -1,115 +0,0 @@ --package sarama -- --type JoinGroupResponse struct { -- Err KError -- GenerationId int32 -- GroupProtocol string -- LeaderId string -- MemberId string -- Members map[string][]byte --} -- --func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) { -- members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members)) -- for id, bin := range r.Members { -- meta := new(ConsumerGroupMemberMetadata) -- if err := decode(bin, meta); err != nil { -- return nil, err -- } -- members[id] = *meta -- } -- return members, nil --} -- --func (r *JoinGroupResponse) encode(pe packetEncoder) error { -- pe.putInt16(int16(r.Err)) -- pe.putInt32(r.GenerationId) -- -- if err := pe.putString(r.GroupProtocol); err != nil { -- return err -- } -- if err := pe.putString(r.LeaderId); err != nil { -- return err -- } -- if err := pe.putString(r.MemberId); err != nil { -- return err -- } -- -- if err := pe.putArrayLength(len(r.Members)); err != nil { -- return err -- } -- -- for memberId, memberMetadata := range r.Members { -- if err := pe.putString(memberId); err != nil { -- return err -- } -- -- if err := pe.putBytes(memberMetadata); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) { -- kerr, err := pd.getInt16() -- if err != nil { -- return err -- } -- -- r.Err = KError(kerr) -- -- if r.GenerationId, err = pd.getInt32(); err != nil { -- return -- } -- -- if r.GroupProtocol, err = pd.getString(); err != nil { -- return -- } -- -- if r.LeaderId, err = pd.getString(); err != nil { -- return -- } -- -- if r.MemberId, err = pd.getString(); err != nil { -- return -- } -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- if n == 0 { -- return nil -- } -- -- r.Members = make(map[string][]byte) -- for i := 0; i < n; i++ { -- memberId, err := pd.getString() -- if err != nil { -- return err -- } -- -- memberMetadata, err := pd.getBytes() -- if err != nil { -- return err -- } -- -- r.Members[memberId] = memberMetadata -- } -- -- return nil --} -- --func (r *JoinGroupResponse) key() int16 { -- return 11 --} -- --func (r *JoinGroupResponse) version() int16 { -- return 0 --} -- --func (r *JoinGroupResponse) requiredVersion() KafkaVersion { -- return V0_9_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go -deleted file mode 100644 -index e177427482fd3..0000000000000 ---- a/vendor/github.com/Shopify/sarama/leave_group_request.go -+++ /dev/null -@@ -1,40 +0,0 @@ --package sarama -- --type LeaveGroupRequest struct { -- GroupId string -- MemberId string --} -- --func (r *LeaveGroupRequest) encode(pe packetEncoder) error { -- if err := pe.putString(r.GroupId); err != nil { -- return err -- } -- if err := pe.putString(r.MemberId); err != nil { -- return err -- } -- -- return nil --} -- --func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) { -- if r.GroupId, err = pd.getString(); err != nil { -- return -- } -- if r.MemberId, err = pd.getString(); err != nil { -- return -- } -- -- return nil --} -- --func (r *LeaveGroupRequest) key() int16 { -- return 13 --} -- --func (r *LeaveGroupRequest) version() int16 { -- return 0 --} -- --func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { -- return V0_9_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go -deleted file mode 100644 -index d60c626da01cc..0000000000000 ---- a/vendor/github.com/Shopify/sarama/leave_group_response.go -+++ /dev/null -@@ -1,32 +0,0 @@ --package sarama -- --type LeaveGroupResponse struct { -- Err KError --} -- --func (r *LeaveGroupResponse) encode(pe packetEncoder) error { -- pe.putInt16(int16(r.Err)) -- return nil --} -- --func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) { -- kerr, err := pd.getInt16() -- if err != nil { -- return err -- } -- r.Err = KError(kerr) -- -- return nil --} -- --func (r *LeaveGroupResponse) key() int16 { -- return 13 --} -- --func (r *LeaveGroupResponse) version() int16 { -- return 0 --} -- --func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { -- return V0_9_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go -deleted file mode 100644 -index 576b1a6f6f8d0..0000000000000 ---- a/vendor/github.com/Shopify/sarama/length_field.go -+++ /dev/null -@@ -1,69 +0,0 @@ --package sarama -- --import ""encoding/binary"" -- --// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths. --type lengthField struct { -- startOffset int --} -- --func (l *lengthField) saveOffset(in int) { -- l.startOffset = in --} -- --func (l *lengthField) reserveLength() int { -- return 4 --} -- --func (l *lengthField) run(curOffset int, buf []byte) error { -- binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4)) -- return nil --} -- --func (l *lengthField) check(curOffset int, buf []byte) error { -- if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) { -- return PacketDecodingError{""length field invalid""} -- } -- -- return nil --} -- --type varintLengthField struct { -- startOffset int -- length int64 --} -- --func (l *varintLengthField) decode(pd packetDecoder) error { -- var err error -- l.length, err = pd.getVarint() -- return err --} -- --func (l *varintLengthField) saveOffset(in int) { -- l.startOffset = in --} -- --func (l *varintLengthField) adjustLength(currOffset int) int { -- oldFieldSize := l.reserveLength() -- l.length = int64(currOffset - l.startOffset - oldFieldSize) -- -- return l.reserveLength() - oldFieldSize --} -- --func (l *varintLengthField) reserveLength() int { -- var tmp [binary.MaxVarintLen64]byte -- return binary.PutVarint(tmp[:], l.length) --} -- --func (l *varintLengthField) run(curOffset int, buf []byte) error { -- binary.PutVarint(buf[l.startOffset:], l.length) -- return nil --} -- --func (l *varintLengthField) check(curOffset int, buf []byte) error { -- if int64(curOffset-l.startOffset-l.reserveLength()) != l.length { -- return PacketDecodingError{""length field invalid""} -- } -- -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go -deleted file mode 100644 -index 3b16abf7fa817..0000000000000 ---- a/vendor/github.com/Shopify/sarama/list_groups_request.go -+++ /dev/null -@@ -1,24 +0,0 @@ --package sarama -- --type ListGroupsRequest struct { --} -- --func (r *ListGroupsRequest) encode(pe packetEncoder) error { -- return nil --} -- --func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) { -- return nil --} -- --func (r *ListGroupsRequest) key() int16 { -- return 16 --} -- --func (r *ListGroupsRequest) version() int16 { -- return 0 --} -- --func (r *ListGroupsRequest) requiredVersion() KafkaVersion { -- return V0_9_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go -deleted file mode 100644 -index 56115d4c75ad1..0000000000000 ---- a/vendor/github.com/Shopify/sarama/list_groups_response.go -+++ /dev/null -@@ -1,69 +0,0 @@ --package sarama -- --type ListGroupsResponse struct { -- Err KError -- Groups map[string]string --} -- --func (r *ListGroupsResponse) encode(pe packetEncoder) error { -- pe.putInt16(int16(r.Err)) -- -- if err := pe.putArrayLength(len(r.Groups)); err != nil { -- return err -- } -- for groupId, protocolType := range r.Groups { -- if err := pe.putString(groupId); err != nil { -- return err -- } -- if err := pe.putString(protocolType); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error { -- kerr, err := pd.getInt16() -- if err != nil { -- return err -- } -- -- r.Err = KError(kerr) -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- if n == 0 { -- return nil -- } -- -- r.Groups = make(map[string]string) -- for i := 0; i < n; i++ { -- groupId, err := pd.getString() -- if err != nil { -- return err -- } -- protocolType, err := pd.getString() -- if err != nil { -- return err -- } -- -- r.Groups[groupId] = protocolType -- } -- -- return nil --} -- --func (r *ListGroupsResponse) key() int16 { -- return 16 --} -- --func (r *ListGroupsResponse) version() int16 { -- return 0 --} -- --func (r *ListGroupsResponse) requiredVersion() KafkaVersion { -- return V0_9_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go -deleted file mode 100644 -index bd5650bbc07f9..0000000000000 ---- a/vendor/github.com/Shopify/sarama/message.go -+++ /dev/null -@@ -1,200 +0,0 @@ --package sarama -- --import ( -- ""bytes"" -- ""compress/gzip"" -- ""fmt"" -- ""io/ioutil"" -- ""time"" -- -- ""github.com/eapache/go-xerial-snappy"" -- ""github.com/pierrec/lz4"" --) -- --// CompressionCodec represents the various compression codecs recognized by Kafka in messages. --type CompressionCodec int8 -- --// only the last two bits are really used --const compressionCodecMask int8 = 0x03 -- --const ( -- CompressionNone CompressionCodec = 0 -- CompressionGZIP CompressionCodec = 1 -- CompressionSnappy CompressionCodec = 2 -- CompressionLZ4 CompressionCodec = 3 --) -- --type Message struct { -- Codec CompressionCodec // codec used to compress the message contents -- Key []byte // the message key, may be nil -- Value []byte // the message contents -- Set *MessageSet // the message set a message might wrap -- Version int8 // v1 requires Kafka 0.10 -- Timestamp time.Time // the timestamp of the message (version 1+ only) -- -- compressedCache []byte -- compressedSize int // used for computing the compression ratio metrics --} -- --func (m *Message) encode(pe packetEncoder) error { -- pe.push(newCRC32Field(crcIEEE)) -- -- pe.putInt8(m.Version) -- -- attributes := int8(m.Codec) & compressionCodecMask -- pe.putInt8(attributes) -- -- if m.Version >= 1 { -- if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil { -- return err -- } -- } -- -- err := pe.putBytes(m.Key) -- if err != nil { -- return err -- } -- -- var payload []byte -- -- if m.compressedCache != nil { -- payload = m.compressedCache -- m.compressedCache = nil -- } else if m.Value != nil { -- switch m.Codec { -- case CompressionNone: -- payload = m.Value -- case CompressionGZIP: -- var buf bytes.Buffer -- writer := gzip.NewWriter(&buf) -- if _, err = writer.Write(m.Value); err != nil { -- return err -- } -- if err = writer.Close(); err != nil { -- return err -- } -- m.compressedCache = buf.Bytes() -- payload = m.compressedCache -- case CompressionSnappy: -- tmp := snappy.Encode(m.Value) -- m.compressedCache = tmp -- payload = m.compressedCache -- case CompressionLZ4: -- var buf bytes.Buffer -- writer := lz4.NewWriter(&buf) -- if _, err = writer.Write(m.Value); err != nil { -- return err -- } -- if err = writer.Close(); err != nil { -- return err -- } -- m.compressedCache = buf.Bytes() -- payload = m.compressedCache -- -- default: -- return PacketEncodingError{fmt.Sprintf(""unsupported compression codec (%d)"", m.Codec)} -- } -- // Keep in mind the compressed payload size for metric gathering -- m.compressedSize = len(payload) -- } -- -- if err = pe.putBytes(payload); err != nil { -- return err -- } -- -- return pe.pop() --} -- --func (m *Message) decode(pd packetDecoder) (err error) { -- err = pd.push(newCRC32Field(crcIEEE)) -- if err != nil { -- return err -- } -- -- m.Version, err = pd.getInt8() -- if err != nil { -- return err -- } -- -- if m.Version > 1 { -- return PacketDecodingError{fmt.Sprintf(""unknown magic byte (%v)"", m.Version)} -- } -- -- attribute, err := pd.getInt8() -- if err != nil { -- return err -- } -- m.Codec = CompressionCodec(attribute & compressionCodecMask) -- -- if m.Version == 1 { -- if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil { -- return err -- } -- } -- -- m.Key, err = pd.getBytes() -- if err != nil { -- return err -- } -- -- m.Value, err = pd.getBytes() -- if err != nil { -- return err -- } -- -- // Required for deep equal assertion during tests but might be useful -- // for future metrics about the compression ratio in fetch requests -- m.compressedSize = len(m.Value) -- -- switch m.Codec { -- case CompressionNone: -- // nothing to do -- case CompressionGZIP: -- if m.Value == nil { -- break -- } -- reader, err := gzip.NewReader(bytes.NewReader(m.Value)) -- if err != nil { -- return err -- } -- if m.Value, err = ioutil.ReadAll(reader); err != nil { -- return err -- } -- if err := m.decodeSet(); err != nil { -- return err -- } -- case CompressionSnappy: -- if m.Value == nil { -- break -- } -- if m.Value, err = snappy.Decode(m.Value); err != nil { -- return err -- } -- if err := m.decodeSet(); err != nil { -- return err -- } -- case CompressionLZ4: -- if m.Value == nil { -- break -- } -- reader := lz4.NewReader(bytes.NewReader(m.Value)) -- if m.Value, err = ioutil.ReadAll(reader); err != nil { -- return err -- } -- if err := m.decodeSet(); err != nil { -- return err -- } -- -- default: -- return PacketDecodingError{fmt.Sprintf(""invalid compression specified (%d)"", m.Codec)} -- } -- -- return pd.pop() --} -- --// decodes a message set from a previousy encoded bulk-message --func (m *Message) decodeSet() (err error) { -- pd := realDecoder{raw: m.Value} -- m.Set = &MessageSet{} -- return m.Set.decode(&pd) --} -diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go -deleted file mode 100644 -index 27db52fdf1f74..0000000000000 ---- a/vendor/github.com/Shopify/sarama/message_set.go -+++ /dev/null -@@ -1,102 +0,0 @@ --package sarama -- --type MessageBlock struct { -- Offset int64 -- Msg *Message --} -- --// Messages convenience helper which returns either all the --// messages that are wrapped in this block --func (msb *MessageBlock) Messages() []*MessageBlock { -- if msb.Msg.Set != nil { -- return msb.Msg.Set.Messages -- } -- return []*MessageBlock{msb} --} -- --func (msb *MessageBlock) encode(pe packetEncoder) error { -- pe.putInt64(msb.Offset) -- pe.push(&lengthField{}) -- err := msb.Msg.encode(pe) -- if err != nil { -- return err -- } -- return pe.pop() --} -- --func (msb *MessageBlock) decode(pd packetDecoder) (err error) { -- if msb.Offset, err = pd.getInt64(); err != nil { -- return err -- } -- -- if err = pd.push(&lengthField{}); err != nil { -- return err -- } -- -- msb.Msg = new(Message) -- if err = msb.Msg.decode(pd); err != nil { -- return err -- } -- -- if err = pd.pop(); err != nil { -- return err -- } -- -- return nil --} -- --type MessageSet struct { -- PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock -- Messages []*MessageBlock --} -- --func (ms *MessageSet) encode(pe packetEncoder) error { -- for i := range ms.Messages { -- err := ms.Messages[i].encode(pe) -- if err != nil { -- return err -- } -- } -- return nil --} -- --func (ms *MessageSet) decode(pd packetDecoder) (err error) { -- ms.Messages = nil -- -- for pd.remaining() > 0 { -- magic, err := magicValue(pd) -- if err != nil { -- if err == ErrInsufficientData { -- ms.PartialTrailingMessage = true -- return nil -- } -- return err -- } -- -- if magic > 1 { -- return nil -- } -- -- msb := new(MessageBlock) -- err = msb.decode(pd) -- switch err { -- case nil: -- ms.Messages = append(ms.Messages, msb) -- case ErrInsufficientData: -- // As an optimization the server is allowed to return a partial message at the -- // end of the message set. Clients should handle this case. So we just ignore such things. -- ms.PartialTrailingMessage = true -- return nil -- default: -- return err -- } -- } -- -- return nil --} -- --func (ms *MessageSet) addMessage(msg *Message) { -- block := new(MessageBlock) -- block.Msg = msg -- ms.Messages = append(ms.Messages, block) --} -diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go -deleted file mode 100644 -index 9a26b55fd0327..0000000000000 ---- a/vendor/github.com/Shopify/sarama/metadata_request.go -+++ /dev/null -@@ -1,52 +0,0 @@ --package sarama -- --type MetadataRequest struct { -- Topics []string --} -- --func (r *MetadataRequest) encode(pe packetEncoder) error { -- err := pe.putArrayLength(len(r.Topics)) -- if err != nil { -- return err -- } -- -- for i := range r.Topics { -- err = pe.putString(r.Topics[i]) -- if err != nil { -- return err -- } -- } -- return nil --} -- --func (r *MetadataRequest) decode(pd packetDecoder, version int16) error { -- topicCount, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- if topicCount == 0 { -- return nil -- } -- -- r.Topics = make([]string, topicCount) -- for i := range r.Topics { -- topic, err := pd.getString() -- if err != nil { -- return err -- } -- r.Topics[i] = topic -- } -- return nil --} -- --func (r *MetadataRequest) key() int16 { -- return 3 --} -- --func (r *MetadataRequest) version() int16 { -- return 0 --} -- --func (r *MetadataRequest) requiredVersion() KafkaVersion { -- return minVersion --} -diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go -deleted file mode 100644 -index f9d6a4271edc4..0000000000000 ---- a/vendor/github.com/Shopify/sarama/metadata_response.go -+++ /dev/null -@@ -1,239 +0,0 @@ --package sarama -- --type PartitionMetadata struct { -- Err KError -- ID int32 -- Leader int32 -- Replicas []int32 -- Isr []int32 --} -- --func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) { -- tmp, err := pd.getInt16() -- if err != nil { -- return err -- } -- pm.Err = KError(tmp) -- -- pm.ID, err = pd.getInt32() -- if err != nil { -- return err -- } -- -- pm.Leader, err = pd.getInt32() -- if err != nil { -- return err -- } -- -- pm.Replicas, err = pd.getInt32Array() -- if err != nil { -- return err -- } -- -- pm.Isr, err = pd.getInt32Array() -- if err != nil { -- return err -- } -- -- return nil --} -- --func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) { -- pe.putInt16(int16(pm.Err)) -- pe.putInt32(pm.ID) -- pe.putInt32(pm.Leader) -- -- err = pe.putInt32Array(pm.Replicas) -- if err != nil { -- return err -- } -- -- err = pe.putInt32Array(pm.Isr) -- if err != nil { -- return err -- } -- -- return nil --} -- --type TopicMetadata struct { -- Err KError -- Name string -- Partitions []*PartitionMetadata --} -- --func (tm *TopicMetadata) decode(pd packetDecoder) (err error) { -- tmp, err := pd.getInt16() -- if err != nil { -- return err -- } -- tm.Err = KError(tmp) -- -- tm.Name, err = pd.getString() -- if err != nil { -- return err -- } -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- tm.Partitions = make([]*PartitionMetadata, n) -- for i := 0; i < n; i++ { -- tm.Partitions[i] = new(PartitionMetadata) -- err = tm.Partitions[i].decode(pd) -- if err != nil { -- return err -- } -- } -- -- return nil --} -- --func (tm *TopicMetadata) encode(pe packetEncoder) (err error) { -- pe.putInt16(int16(tm.Err)) -- -- err = pe.putString(tm.Name) -- if err != nil { -- return err -- } -- -- err = pe.putArrayLength(len(tm.Partitions)) -- if err != nil { -- return err -- } -- -- for _, pm := range tm.Partitions { -- err = pm.encode(pe) -- if err != nil { -- return err -- } -- } -- -- return nil --} -- --type MetadataResponse struct { -- Brokers []*Broker -- Topics []*TopicMetadata --} -- --func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- r.Brokers = make([]*Broker, n) -- for i := 0; i < n; i++ { -- r.Brokers[i] = new(Broker) -- err = r.Brokers[i].decode(pd) -- if err != nil { -- return err -- } -- } -- -- n, err = pd.getArrayLength() -- if err != nil { -- return err -- } -- -- r.Topics = make([]*TopicMetadata, n) -- for i := 0; i < n; i++ { -- r.Topics[i] = new(TopicMetadata) -- err = r.Topics[i].decode(pd) -- if err != nil { -- return err -- } -- } -- -- return nil --} -- --func (r *MetadataResponse) encode(pe packetEncoder) error { -- err := pe.putArrayLength(len(r.Brokers)) -- if err != nil { -- return err -- } -- for _, broker := range r.Brokers { -- err = broker.encode(pe) -- if err != nil { -- return err -- } -- } -- -- err = pe.putArrayLength(len(r.Topics)) -- if err != nil { -- return err -- } -- for _, tm := range r.Topics { -- err = tm.encode(pe) -- if err != nil { -- return err -- } -- } -- -- return nil --} -- --func (r *MetadataResponse) key() int16 { -- return 3 --} -- --func (r *MetadataResponse) version() int16 { -- return 0 --} -- --func (r *MetadataResponse) requiredVersion() KafkaVersion { -- return minVersion --} -- --// testing API -- --func (r *MetadataResponse) AddBroker(addr string, id int32) { -- r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr}) --} -- --func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata { -- var tmatch *TopicMetadata -- -- for _, tm := range r.Topics { -- if tm.Name == topic { -- tmatch = tm -- goto foundTopic -- } -- } -- -- tmatch = new(TopicMetadata) -- tmatch.Name = topic -- r.Topics = append(r.Topics, tmatch) -- --foundTopic: -- -- tmatch.Err = err -- return tmatch --} -- --func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) { -- tmatch := r.AddTopic(topic, ErrNoError) -- var pmatch *PartitionMetadata -- -- for _, pm := range tmatch.Partitions { -- if pm.ID == partition { -- pmatch = pm -- goto foundPartition -- } -- } -- -- pmatch = new(PartitionMetadata) -- pmatch.ID = partition -- tmatch.Partitions = append(tmatch.Partitions, pmatch) -- --foundPartition: -- -- pmatch.Leader = brokerID -- pmatch.Replicas = replicas -- pmatch.Isr = isr -- pmatch.Err = err -- --} -diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go -deleted file mode 100644 -index 4869708e9449a..0000000000000 ---- a/vendor/github.com/Shopify/sarama/metrics.go -+++ /dev/null -@@ -1,51 +0,0 @@ --package sarama -- --import ( -- ""fmt"" -- ""strings"" -- -- ""github.com/rcrowley/go-metrics"" --) -- --// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library: --// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution, --// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements. --// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38 --const ( -- metricsReservoirSize = 1028 -- metricsAlphaFactor = 0.015 --) -- --func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram { -- return r.GetOrRegister(name, func() metrics.Histogram { -- return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor)) -- }).(metrics.Histogram) --} -- --func getMetricNameForBroker(name string, broker *Broker) string { -- // Use broker id like the Java client as it does not contain '.' or ':' characters that -- // can be interpreted as special character by monitoring tool (e.g. Graphite) -- return fmt.Sprintf(name+""-for-broker-%d"", broker.ID()) --} -- --func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter { -- return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r) --} -- --func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram { -- return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r) --} -- --func getMetricNameForTopic(name string, topic string) string { -- // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy -- // cf. KAFKA-1902 and KAFKA-2337 -- return fmt.Sprintf(name+""-for-topic-%s"", strings.Replace(topic, ""."", ""_"", -1)) --} -- --func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter { -- return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r) --} -- --func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram { -- return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r) --} -diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go -deleted file mode 100644 -index 55ef1e2920f6c..0000000000000 ---- a/vendor/github.com/Shopify/sarama/mockbroker.go -+++ /dev/null -@@ -1,330 +0,0 @@ --package sarama -- --import ( -- ""bytes"" -- ""encoding/binary"" -- ""fmt"" -- ""io"" -- ""net"" -- ""reflect"" -- ""strconv"" -- ""sync"" -- ""time"" -- -- ""github.com/davecgh/go-spew/spew"" --) -- --const ( -- expectationTimeout = 500 * time.Millisecond --) -- --type requestHandlerFunc func(req *request) (res encoder) -- --// RequestNotifierFunc is invoked when a mock broker processes a request successfully --// and will provides the number of bytes read and written. --type RequestNotifierFunc func(bytesRead, bytesWritten int) -- --// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed --// to facilitate testing of higher level or specialized consumers and producers --// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol, --// but rather provides a facility to do that. It takes care of the TCP --// transport, request unmarshaling, response marshaling, and makes it the test --// writer responsibility to program correct according to the Kafka API protocol --// MockBroker behaviour. --// --// MockBroker is implemented as a TCP server listening on a kernel-selected --// localhost port that can accept many connections. It reads Kafka requests --// from that connection and returns responses programmed by the SetHandlerByMap --// function. If a MockBroker receives a request that it has no programmed --// response for, then it returns nothing and the request times out. --// --// A set of MockRequest builders to define mappings used by MockBroker is --// provided by Sarama. But users can develop MockRequests of their own and use --// them along with or instead of the standard ones. --// --// When running tests with MockBroker it is strongly recommended to specify --// a timeout to `go test` so that if the broker hangs waiting for a response, --// the test panics. --// --// It is not necessary to prefix message length or correlation ID to your --// response bytes, the server does that automatically as a convenience. --type MockBroker struct { -- brokerID int32 -- port int32 -- closing chan none -- stopper chan none -- expectations chan encoder -- listener net.Listener -- t TestReporter -- latency time.Duration -- handler requestHandlerFunc -- notifier RequestNotifierFunc -- history []RequestResponse -- lock sync.Mutex --} -- --// RequestResponse represents a Request/Response pair processed by MockBroker. --type RequestResponse struct { -- Request protocolBody -- Response encoder --} -- --// SetLatency makes broker pause for the specified period every time before --// replying. --func (b *MockBroker) SetLatency(latency time.Duration) { -- b.latency = latency --} -- --// SetHandlerByMap defines mapping of Request types to MockResponses. When a --// request is received by the broker, it looks up the request type in the map --// and uses the found MockResponse instance to generate an appropriate reply. --// If the request type is not found in the map then nothing is sent. --func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { -- b.setHandler(func(req *request) (res encoder) { -- reqTypeName := reflect.TypeOf(req.body).Elem().Name() -- mockResponse := handlerMap[reqTypeName] -- if mockResponse == nil { -- return nil -- } -- return mockResponse.For(req.body) -- }) --} -- --// SetNotifier set a function that will get invoked whenever a request has been --// processed successfully and will provide the number of bytes read and written --func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) { -- b.lock.Lock() -- b.notifier = notifier -- b.lock.Unlock() --} -- --// BrokerID returns broker ID assigned to the broker. --func (b *MockBroker) BrokerID() int32 { -- return b.brokerID --} -- --// History returns a slice of RequestResponse pairs in the order they were --// processed by the broker. Note that in case of multiple connections to the --// broker the order expected by a test can be different from the order recorded --// in the history, unless some synchronization is implemented in the test. --func (b *MockBroker) History() []RequestResponse { -- b.lock.Lock() -- history := make([]RequestResponse, len(b.history)) -- copy(history, b.history) -- b.lock.Unlock() -- return history --} -- --// Port returns the TCP port number the broker is listening for requests on. --func (b *MockBroker) Port() int32 { -- return b.port --} -- --// Addr returns the broker connection string in the form ""
:"". --func (b *MockBroker) Addr() string { -- return b.listener.Addr().String() --} -- --// Close terminates the broker blocking until it stops internal goroutines and --// releases all resources. --func (b *MockBroker) Close() { -- close(b.expectations) -- if len(b.expectations) > 0 { -- buf := bytes.NewBufferString(fmt.Sprintf(""mockbroker/%d: not all expectations were satisfied! Still waiting on:\n"", b.BrokerID())) -- for e := range b.expectations { -- _, _ = buf.WriteString(spew.Sdump(e)) -- } -- b.t.Error(buf.String()) -- } -- close(b.closing) -- <-b.stopper --} -- --// setHandler sets the specified function as the request handler. Whenever --// a mock broker reads a request from the wire it passes the request to the --// function and sends back whatever the handler function returns. --func (b *MockBroker) setHandler(handler requestHandlerFunc) { -- b.lock.Lock() -- b.handler = handler -- b.lock.Unlock() --} -- --func (b *MockBroker) serverLoop() { -- defer close(b.stopper) -- var err error -- var conn net.Conn -- -- go func() { -- <-b.closing -- err := b.listener.Close() -- if err != nil { -- b.t.Error(err) -- } -- }() -- -- wg := &sync.WaitGroup{} -- i := 0 -- for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() { -- wg.Add(1) -- go b.handleRequests(conn, i, wg) -- i++ -- } -- wg.Wait() -- Logger.Printf(""*** mockbroker/%d: listener closed, err=%v"", b.BrokerID(), err) --} -- --func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) { -- defer wg.Done() -- defer func() { -- _ = conn.Close() -- }() -- Logger.Printf(""*** mockbroker/%d/%d: connection opened"", b.BrokerID(), idx) -- var err error -- -- abort := make(chan none) -- defer close(abort) -- go func() { -- select { -- case <-b.closing: -- _ = conn.Close() -- case <-abort: -- } -- }() -- -- resHeader := make([]byte, 8) -- for { -- req, bytesRead, err := decodeRequest(conn) -- if err != nil { -- Logger.Printf(""*** mockbroker/%d/%d: invalid request: err=%+v, %+v"", b.brokerID, idx, err, spew.Sdump(req)) -- b.serverError(err) -- break -- } -- -- if b.latency > 0 { -- time.Sleep(b.latency) -- } -- -- b.lock.Lock() -- res := b.handler(req) -- b.history = append(b.history, RequestResponse{req.body, res}) -- b.lock.Unlock() -- -- if res == nil { -- Logger.Printf(""*** mockbroker/%d/%d: ignored %v"", b.brokerID, idx, spew.Sdump(req)) -- continue -- } -- Logger.Printf(""*** mockbroker/%d/%d: served %v -> %v"", b.brokerID, idx, req, res) -- -- encodedRes, err := encode(res, nil) -- if err != nil { -- b.serverError(err) -- break -- } -- if len(encodedRes) == 0 { -- b.lock.Lock() -- if b.notifier != nil { -- b.notifier(bytesRead, 0) -- } -- b.lock.Unlock() -- continue -- } -- -- binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4)) -- binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID)) -- if _, err = conn.Write(resHeader); err != nil { -- b.serverError(err) -- break -- } -- if _, err = conn.Write(encodedRes); err != nil { -- b.serverError(err) -- break -- } -- -- b.lock.Lock() -- if b.notifier != nil { -- b.notifier(bytesRead, len(resHeader)+len(encodedRes)) -- } -- b.lock.Unlock() -- } -- Logger.Printf(""*** mockbroker/%d/%d: connection closed, err=%v"", b.BrokerID(), idx, err) --} -- --func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) { -- select { -- case res, ok := <-b.expectations: -- if !ok { -- return nil -- } -- return res -- case <-time.After(expectationTimeout): -- return nil -- } --} -- --func (b *MockBroker) serverError(err error) { -- isConnectionClosedError := false -- if _, ok := err.(*net.OpError); ok { -- isConnectionClosedError = true -- } else if err == io.EOF { -- isConnectionClosedError = true -- } else if err.Error() == ""use of closed network connection"" { -- isConnectionClosedError = true -- } -- -- if isConnectionClosedError { -- return -- } -- -- b.t.Errorf(err.Error()) --} -- --// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the --// test framework and a channel of responses to use. If an error occurs it is --// simply logged to the TestReporter and the broker exits. --func NewMockBroker(t TestReporter, brokerID int32) *MockBroker { -- return NewMockBrokerAddr(t, brokerID, ""localhost:0"") --} -- --// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give --// it rather than just some ephemeral port. --func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker { -- listener, err := net.Listen(""tcp"", addr) -- if err != nil { -- t.Fatal(err) -- } -- return NewMockBrokerListener(t, brokerID, listener) --} -- --// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified. --func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker { -- var err error -- -- broker := &MockBroker{ -- closing: make(chan none), -- stopper: make(chan none), -- t: t, -- brokerID: brokerID, -- expectations: make(chan encoder, 512), -- listener: listener, -- } -- broker.handler = broker.defaultRequestHandler -- -- Logger.Printf(""*** mockbroker/%d listening on %s\n"", brokerID, broker.listener.Addr().String()) -- _, portStr, err := net.SplitHostPort(broker.listener.Addr().String()) -- if err != nil { -- t.Fatal(err) -- } -- tmp, err := strconv.ParseInt(portStr, 10, 32) -- if err != nil { -- t.Fatal(err) -- } -- broker.port = int32(tmp) -- -- go broker.serverLoop() -- -- return broker --} -- --func (b *MockBroker) Returns(e encoder) { -- b.expectations <- e --} -diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go -deleted file mode 100644 -index f79a9d5e9b42a..0000000000000 ---- a/vendor/github.com/Shopify/sarama/mockresponses.go -+++ /dev/null -@@ -1,477 +0,0 @@ --package sarama -- --import ( -- ""fmt"" --) -- --// TestReporter has methods matching go's testing.T to avoid importing --// `testing` in the main part of the library. --type TestReporter interface { -- Error(...interface{}) -- Errorf(string, ...interface{}) -- Fatal(...interface{}) -- Fatalf(string, ...interface{}) --} -- --// MockResponse is a response builder interface it defines one method that --// allows generating a response based on a request body. MockResponses are used --// to program behavior of MockBroker in tests. --type MockResponse interface { -- For(reqBody versionedDecoder) (res encoder) --} -- --// MockWrapper is a mock response builder that returns a particular concrete --// response regardless of the actual request passed to the `For` method. --type MockWrapper struct { -- res encoder --} -- --func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) { -- return mw.res --} -- --func NewMockWrapper(res encoder) *MockWrapper { -- return &MockWrapper{res: res} --} -- --// MockSequence is a mock response builder that is created from a sequence of --// concrete responses. Every time when a `MockBroker` calls its `For` method --// the next response from the sequence is returned. When the end of the --// sequence is reached the last element from the sequence is returned. --type MockSequence struct { -- responses []MockResponse --} -- --func NewMockSequence(responses ...interface{}) *MockSequence { -- ms := &MockSequence{} -- ms.responses = make([]MockResponse, len(responses)) -- for i, res := range responses { -- switch res := res.(type) { -- case MockResponse: -- ms.responses[i] = res -- case encoder: -- ms.responses[i] = NewMockWrapper(res) -- default: -- panic(fmt.Sprintf(""Unexpected response type: %T"", res)) -- } -- } -- return ms --} -- --func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) { -- res = mc.responses[0].For(reqBody) -- if len(mc.responses) > 1 { -- mc.responses = mc.responses[1:] -- } -- return res --} -- --// MockMetadataResponse is a `MetadataResponse` builder. --type MockMetadataResponse struct { -- leaders map[string]map[int32]int32 -- brokers map[string]int32 -- t TestReporter --} -- --func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse { -- return &MockMetadataResponse{ -- leaders: make(map[string]map[int32]int32), -- brokers: make(map[string]int32), -- t: t, -- } --} -- --func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse { -- partitions := mmr.leaders[topic] -- if partitions == nil { -- partitions = make(map[int32]int32) -- mmr.leaders[topic] = partitions -- } -- partitions[partition] = brokerID -- return mmr --} -- --func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse { -- mmr.brokers[addr] = brokerID -- return mmr --} -- --func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder { -- metadataRequest := reqBody.(*MetadataRequest) -- metadataResponse := &MetadataResponse{} -- for addr, brokerID := range mmr.brokers { -- metadataResponse.AddBroker(addr, brokerID) -- } -- if len(metadataRequest.Topics) == 0 { -- for topic, partitions := range mmr.leaders { -- for partition, brokerID := range partitions { -- metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) -- } -- } -- return metadataResponse -- } -- for _, topic := range metadataRequest.Topics { -- for partition, brokerID := range mmr.leaders[topic] { -- metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) -- } -- } -- return metadataResponse --} -- --// MockOffsetResponse is an `OffsetResponse` builder. --type MockOffsetResponse struct { -- offsets map[string]map[int32]map[int64]int64 -- t TestReporter -- version int16 --} -- --func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { -- return &MockOffsetResponse{ -- offsets: make(map[string]map[int32]map[int64]int64), -- t: t, -- } --} -- --func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse { -- mor.version = version -- return mor --} -- --func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse { -- partitions := mor.offsets[topic] -- if partitions == nil { -- partitions = make(map[int32]map[int64]int64) -- mor.offsets[topic] = partitions -- } -- times := partitions[partition] -- if times == nil { -- times = make(map[int64]int64) -- partitions[partition] = times -- } -- times[time] = offset -- return mor --} -- --func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder { -- offsetRequest := reqBody.(*OffsetRequest) -- offsetResponse := &OffsetResponse{Version: mor.version} -- for topic, partitions := range offsetRequest.blocks { -- for partition, block := range partitions { -- offset := mor.getOffset(topic, partition, block.time) -- offsetResponse.AddTopicPartition(topic, partition, offset) -- } -- } -- return offsetResponse --} -- --func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 { -- partitions := mor.offsets[topic] -- if partitions == nil { -- mor.t.Errorf(""missing topic: %s"", topic) -- } -- times := partitions[partition] -- if times == nil { -- mor.t.Errorf(""missing partition: %d"", partition) -- } -- offset, ok := times[time] -- if !ok { -- mor.t.Errorf(""missing time: %d"", time) -- } -- return offset --} -- --// MockFetchResponse is a `FetchResponse` builder. --type MockFetchResponse struct { -- messages map[string]map[int32]map[int64]Encoder -- highWaterMarks map[string]map[int32]int64 -- t TestReporter -- batchSize int -- version int16 --} -- --func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { -- return &MockFetchResponse{ -- messages: make(map[string]map[int32]map[int64]Encoder), -- highWaterMarks: make(map[string]map[int32]int64), -- t: t, -- batchSize: batchSize, -- } --} -- --func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse { -- mfr.version = version -- return mfr --} -- --func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse { -- partitions := mfr.messages[topic] -- if partitions == nil { -- partitions = make(map[int32]map[int64]Encoder) -- mfr.messages[topic] = partitions -- } -- messages := partitions[partition] -- if messages == nil { -- messages = make(map[int64]Encoder) -- partitions[partition] = messages -- } -- messages[offset] = msg -- return mfr --} -- --func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse { -- partitions := mfr.highWaterMarks[topic] -- if partitions == nil { -- partitions = make(map[int32]int64) -- mfr.highWaterMarks[topic] = partitions -- } -- partitions[partition] = offset -- return mfr --} -- --func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder { -- fetchRequest := reqBody.(*FetchRequest) -- res := &FetchResponse{ -- Version: mfr.version, -- } -- for topic, partitions := range fetchRequest.blocks { -- for partition, block := range partitions { -- initialOffset := block.fetchOffset -- offset := initialOffset -- maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition)) -- for i := 0; i < mfr.batchSize && offset < maxOffset; { -- msg := mfr.getMessage(topic, partition, offset) -- if msg != nil { -- res.AddMessage(topic, partition, nil, msg, offset) -- i++ -- } -- offset++ -- } -- fb := res.GetBlock(topic, partition) -- if fb == nil { -- res.AddError(topic, partition, ErrNoError) -- fb = res.GetBlock(topic, partition) -- } -- fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition) -- } -- } -- return res --} -- --func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder { -- partitions := mfr.messages[topic] -- if partitions == nil { -- return nil -- } -- messages := partitions[partition] -- if messages == nil { -- return nil -- } -- return messages[offset] --} -- --func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int { -- partitions := mfr.messages[topic] -- if partitions == nil { -- return 0 -- } -- messages := partitions[partition] -- if messages == nil { -- return 0 -- } -- return len(messages) --} -- --func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 { -- partitions := mfr.highWaterMarks[topic] -- if partitions == nil { -- return 0 -- } -- return partitions[partition] --} -- --// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder. --type MockConsumerMetadataResponse struct { -- coordinators map[string]interface{} -- t TestReporter --} -- --func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse { -- return &MockConsumerMetadataResponse{ -- coordinators: make(map[string]interface{}), -- t: t, -- } --} -- --func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse { -- mr.coordinators[group] = broker -- return mr --} -- --func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse { -- mr.coordinators[group] = kerror -- return mr --} -- --func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder { -- req := reqBody.(*ConsumerMetadataRequest) -- group := req.ConsumerGroup -- res := &ConsumerMetadataResponse{} -- v := mr.coordinators[group] -- switch v := v.(type) { -- case *MockBroker: -- res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()} -- case KError: -- res.Err = v -- } -- return res --} -- --// MockOffsetCommitResponse is a `OffsetCommitResponse` builder. --type MockOffsetCommitResponse struct { -- errors map[string]map[string]map[int32]KError -- t TestReporter --} -- --func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse { -- return &MockOffsetCommitResponse{t: t} --} -- --func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse { -- if mr.errors == nil { -- mr.errors = make(map[string]map[string]map[int32]KError) -- } -- topics := mr.errors[group] -- if topics == nil { -- topics = make(map[string]map[int32]KError) -- mr.errors[group] = topics -- } -- partitions := topics[topic] -- if partitions == nil { -- partitions = make(map[int32]KError) -- topics[topic] = partitions -- } -- partitions[partition] = kerror -- return mr --} -- --func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder { -- req := reqBody.(*OffsetCommitRequest) -- group := req.ConsumerGroup -- res := &OffsetCommitResponse{} -- for topic, partitions := range req.blocks { -- for partition := range partitions { -- res.AddError(topic, partition, mr.getError(group, topic, partition)) -- } -- } -- return res --} -- --func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError { -- topics := mr.errors[group] -- if topics == nil { -- return ErrNoError -- } -- partitions := topics[topic] -- if partitions == nil { -- return ErrNoError -- } -- kerror, ok := partitions[partition] -- if !ok { -- return ErrNoError -- } -- return kerror --} -- --// MockProduceResponse is a `ProduceResponse` builder. --type MockProduceResponse struct { -- version int16 -- errors map[string]map[int32]KError -- t TestReporter --} -- --func NewMockProduceResponse(t TestReporter) *MockProduceResponse { -- return &MockProduceResponse{t: t} --} -- --func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse { -- mr.version = version -- return mr --} -- --func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse { -- if mr.errors == nil { -- mr.errors = make(map[string]map[int32]KError) -- } -- partitions := mr.errors[topic] -- if partitions == nil { -- partitions = make(map[int32]KError) -- mr.errors[topic] = partitions -- } -- partitions[partition] = kerror -- return mr --} -- --func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder { -- req := reqBody.(*ProduceRequest) -- res := &ProduceResponse{ -- Version: mr.version, -- } -- for topic, partitions := range req.records { -- for partition := range partitions { -- res.AddTopicPartition(topic, partition, mr.getError(topic, partition)) -- } -- } -- return res --} -- --func (mr *MockProduceResponse) getError(topic string, partition int32) KError { -- partitions := mr.errors[topic] -- if partitions == nil { -- return ErrNoError -- } -- kerror, ok := partitions[partition] -- if !ok { -- return ErrNoError -- } -- return kerror --} -- --// MockOffsetFetchResponse is a `OffsetFetchResponse` builder. --type MockOffsetFetchResponse struct { -- offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock -- t TestReporter --} -- --func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse { -- return &MockOffsetFetchResponse{t: t} --} -- --func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse { -- if mr.offsets == nil { -- mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock) -- } -- topics := mr.offsets[group] -- if topics == nil { -- topics = make(map[string]map[int32]*OffsetFetchResponseBlock) -- mr.offsets[group] = topics -- } -- partitions := topics[topic] -- if partitions == nil { -- partitions = make(map[int32]*OffsetFetchResponseBlock) -- topics[topic] = partitions -- } -- partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror} -- return mr --} -- --func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder { -- req := reqBody.(*OffsetFetchRequest) -- group := req.ConsumerGroup -- res := &OffsetFetchResponse{} -- for topic, partitions := range mr.offsets[group] { -- for partition, block := range partitions { -- res.AddBlock(topic, partition, block) -- } -- } -- return res --} -diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go -deleted file mode 100644 -index b21ea634b0243..0000000000000 ---- a/vendor/github.com/Shopify/sarama/offset_commit_request.go -+++ /dev/null -@@ -1,190 +0,0 @@ --package sarama -- --// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which --// tells the broker to set the timestamp to the time at which the request was received. --// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2. --const ReceiveTime int64 = -1 -- --// GroupGenerationUndefined is a special value for the group generation field of --// Offset Commit Requests that should be used when a consumer group does not rely --// on Kafka for partition management. --const GroupGenerationUndefined = -1 -- --type offsetCommitRequestBlock struct { -- offset int64 -- timestamp int64 -- metadata string --} -- --func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error { -- pe.putInt64(b.offset) -- if version == 1 { -- pe.putInt64(b.timestamp) -- } else if b.timestamp != 0 { -- Logger.Println(""Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored"") -- } -- -- return pe.putString(b.metadata) --} -- --func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) { -- if b.offset, err = pd.getInt64(); err != nil { -- return err -- } -- if version == 1 { -- if b.timestamp, err = pd.getInt64(); err != nil { -- return err -- } -- } -- b.metadata, err = pd.getString() -- return err --} -- --type OffsetCommitRequest struct { -- ConsumerGroup string -- ConsumerGroupGeneration int32 // v1 or later -- ConsumerID string // v1 or later -- RetentionTime int64 // v2 or later -- -- // Version can be: -- // - 0 (kafka 0.8.1 and later) -- // - 1 (kafka 0.8.2 and later) -- // - 2 (kafka 0.9.0 and later) -- Version int16 -- blocks map[string]map[int32]*offsetCommitRequestBlock --} -- --func (r *OffsetCommitRequest) encode(pe packetEncoder) error { -- if r.Version < 0 || r.Version > 2 { -- return PacketEncodingError{""invalid or unsupported OffsetCommitRequest version field""} -- } -- -- if err := pe.putString(r.ConsumerGroup); err != nil { -- return err -- } -- -- if r.Version >= 1 { -- pe.putInt32(r.ConsumerGroupGeneration) -- if err := pe.putString(r.ConsumerID); err != nil { -- return err -- } -- } else { -- if r.ConsumerGroupGeneration != 0 { -- Logger.Println(""Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored"") -- } -- if r.ConsumerID != """" { -- Logger.Println(""Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored"") -- } -- } -- -- if r.Version >= 2 { -- pe.putInt64(r.RetentionTime) -- } else if r.RetentionTime != 0 { -- Logger.Println(""Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored"") -- } -- -- if err := pe.putArrayLength(len(r.blocks)); err != nil { -- return err -- } -- for topic, partitions := range r.blocks { -- if err := pe.putString(topic); err != nil { -- return err -- } -- if err := pe.putArrayLength(len(partitions)); err != nil { -- return err -- } -- for partition, block := range partitions { -- pe.putInt32(partition) -- if err := block.encode(pe, r.Version); err != nil { -- return err -- } -- } -- } -- return nil --} -- --func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { -- r.Version = version -- -- if r.ConsumerGroup, err = pd.getString(); err != nil { -- return err -- } -- -- if r.Version >= 1 { -- if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil { -- return err -- } -- if r.ConsumerID, err = pd.getString(); err != nil { -- return err -- } -- } -- -- if r.Version >= 2 { -- if r.RetentionTime, err = pd.getInt64(); err != nil { -- return err -- } -- } -- -- topicCount, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- if topicCount == 0 { -- return nil -- } -- r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) -- for i := 0; i < topicCount; i++ { -- topic, err := pd.getString() -- if err != nil { -- return err -- } -- partitionCount, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) -- for j := 0; j < partitionCount; j++ { -- partition, err := pd.getInt32() -- if err != nil { -- return err -- } -- block := &offsetCommitRequestBlock{} -- if err := block.decode(pd, r.Version); err != nil { -- return err -- } -- r.blocks[topic][partition] = block -- } -- } -- return nil --} -- --func (r *OffsetCommitRequest) key() int16 { -- return 8 --} -- --func (r *OffsetCommitRequest) version() int16 { -- return r.Version --} -- --func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { -- switch r.Version { -- case 1: -- return V0_8_2_0 -- case 2: -- return V0_9_0_0 -- default: -- return minVersion -- } --} -- --func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { -- if r.blocks == nil { -- r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) -- } -- -- if r.blocks[topic] == nil { -- r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) -- } -- -- r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata} --} -diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go -deleted file mode 100644 -index 7f277e7753a13..0000000000000 ---- a/vendor/github.com/Shopify/sarama/offset_commit_response.go -+++ /dev/null -@@ -1,85 +0,0 @@ --package sarama -- --type OffsetCommitResponse struct { -- Errors map[string]map[int32]KError --} -- --func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) { -- if r.Errors == nil { -- r.Errors = make(map[string]map[int32]KError) -- } -- partitions := r.Errors[topic] -- if partitions == nil { -- partitions = make(map[int32]KError) -- r.Errors[topic] = partitions -- } -- partitions[partition] = kerror --} -- --func (r *OffsetCommitResponse) encode(pe packetEncoder) error { -- if err := pe.putArrayLength(len(r.Errors)); err != nil { -- return err -- } -- for topic, partitions := range r.Errors { -- if err := pe.putString(topic); err != nil { -- return err -- } -- if err := pe.putArrayLength(len(partitions)); err != nil { -- return err -- } -- for partition, kerror := range partitions { -- pe.putInt32(partition) -- pe.putInt16(int16(kerror)) -- } -- } -- return nil --} -- --func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { -- numTopics, err := pd.getArrayLength() -- if err != nil || numTopics == 0 { -- return err -- } -- -- r.Errors = make(map[string]map[int32]KError, numTopics) -- for i := 0; i < numTopics; i++ { -- name, err := pd.getString() -- if err != nil { -- return err -- } -- -- numErrors, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- r.Errors[name] = make(map[int32]KError, numErrors) -- -- for j := 0; j < numErrors; j++ { -- id, err := pd.getInt32() -- if err != nil { -- return err -- } -- -- tmp, err := pd.getInt16() -- if err != nil { -- return err -- } -- r.Errors[name][id] = KError(tmp) -- } -- } -- -- return nil --} -- --func (r *OffsetCommitResponse) key() int16 { -- return 8 --} -- --func (r *OffsetCommitResponse) version() int16 { -- return 0 --} -- --func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { -- return minVersion --} -diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go -deleted file mode 100644 -index b19fe79ba7aa0..0000000000000 ---- a/vendor/github.com/Shopify/sarama/offset_fetch_request.go -+++ /dev/null -@@ -1,81 +0,0 @@ --package sarama -- --type OffsetFetchRequest struct { -- ConsumerGroup string -- Version int16 -- partitions map[string][]int32 --} -- --func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) { -- if r.Version < 0 || r.Version > 1 { -- return PacketEncodingError{""invalid or unsupported OffsetFetchRequest version field""} -- } -- -- if err = pe.putString(r.ConsumerGroup); err != nil { -- return err -- } -- if err = pe.putArrayLength(len(r.partitions)); err != nil { -- return err -- } -- for topic, partitions := range r.partitions { -- if err = pe.putString(topic); err != nil { -- return err -- } -- if err = pe.putInt32Array(partitions); err != nil { -- return err -- } -- } -- return nil --} -- --func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) { -- r.Version = version -- if r.ConsumerGroup, err = pd.getString(); err != nil { -- return err -- } -- partitionCount, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- if partitionCount == 0 { -- return nil -- } -- r.partitions = make(map[string][]int32) -- for i := 0; i < partitionCount; i++ { -- topic, err := pd.getString() -- if err != nil { -- return err -- } -- partitions, err := pd.getInt32Array() -- if err != nil { -- return err -- } -- r.partitions[topic] = partitions -- } -- return nil --} -- --func (r *OffsetFetchRequest) key() int16 { -- return 9 --} -- --func (r *OffsetFetchRequest) version() int16 { -- return r.Version --} -- --func (r *OffsetFetchRequest) requiredVersion() KafkaVersion { -- switch r.Version { -- case 1: -- return V0_8_2_0 -- default: -- return minVersion -- } --} -- --func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) { -- if r.partitions == nil { -- r.partitions = make(map[string][]int32) -- } -- -- r.partitions[topic] = append(r.partitions[topic], partitionID) --} -diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go -deleted file mode 100644 -index 323220eac9769..0000000000000 ---- a/vendor/github.com/Shopify/sarama/offset_fetch_response.go -+++ /dev/null -@@ -1,143 +0,0 @@ --package sarama -- --type OffsetFetchResponseBlock struct { -- Offset int64 -- Metadata string -- Err KError --} -- --func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) { -- b.Offset, err = pd.getInt64() -- if err != nil { -- return err -- } -- -- b.Metadata, err = pd.getString() -- if err != nil { -- return err -- } -- -- tmp, err := pd.getInt16() -- if err != nil { -- return err -- } -- b.Err = KError(tmp) -- -- return nil --} -- --func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) { -- pe.putInt64(b.Offset) -- -- err = pe.putString(b.Metadata) -- if err != nil { -- return err -- } -- -- pe.putInt16(int16(b.Err)) -- -- return nil --} -- --type OffsetFetchResponse struct { -- Blocks map[string]map[int32]*OffsetFetchResponseBlock --} -- --func (r *OffsetFetchResponse) encode(pe packetEncoder) error { -- if err := pe.putArrayLength(len(r.Blocks)); err != nil { -- return err -- } -- for topic, partitions := range r.Blocks { -- if err := pe.putString(topic); err != nil { -- return err -- } -- if err := pe.putArrayLength(len(partitions)); err != nil { -- return err -- } -- for partition, block := range partitions { -- pe.putInt32(partition) -- if err := block.encode(pe); err != nil { -- return err -- } -- } -- } -- return nil --} -- --func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) { -- numTopics, err := pd.getArrayLength() -- if err != nil || numTopics == 0 { -- return err -- } -- -- r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics) -- for i := 0; i < numTopics; i++ { -- name, err := pd.getString() -- if err != nil { -- return err -- } -- -- numBlocks, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- if numBlocks == 0 { -- r.Blocks[name] = nil -- continue -- } -- r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks) -- -- for j := 0; j < numBlocks; j++ { -- id, err := pd.getInt32() -- if err != nil { -- return err -- } -- -- block := new(OffsetFetchResponseBlock) -- err = block.decode(pd) -- if err != nil { -- return err -- } -- r.Blocks[name][id] = block -- } -- } -- -- return nil --} -- --func (r *OffsetFetchResponse) key() int16 { -- return 9 --} -- --func (r *OffsetFetchResponse) version() int16 { -- return 0 --} -- --func (r *OffsetFetchResponse) requiredVersion() KafkaVersion { -- return minVersion --} -- --func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock { -- if r.Blocks == nil { -- return nil -- } -- -- if r.Blocks[topic] == nil { -- return nil -- } -- -- return r.Blocks[topic][partition] --} -- --func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) { -- if r.Blocks == nil { -- r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock) -- } -- partitions := r.Blocks[topic] -- if partitions == nil { -- partitions = make(map[int32]*OffsetFetchResponseBlock) -- r.Blocks[topic] = partitions -- } -- partitions[partition] = block --} -diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go -deleted file mode 100644 -index 6c01f959e99fa..0000000000000 ---- a/vendor/github.com/Shopify/sarama/offset_manager.go -+++ /dev/null -@@ -1,560 +0,0 @@ --package sarama -- --import ( -- ""sync"" -- ""time"" --) -- --// Offset Manager -- --// OffsetManager uses Kafka to store and fetch consumed partition offsets. --type OffsetManager interface { -- // ManagePartition creates a PartitionOffsetManager on the given topic/partition. -- // It will return an error if this OffsetManager is already managing the given -- // topic/partition. -- ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) -- -- // Close stops the OffsetManager from managing offsets. It is required to call -- // this function before an OffsetManager object passes out of scope, as it -- // will otherwise leak memory. You must call this after all the -- // PartitionOffsetManagers are closed. -- Close() error --} -- --type offsetManager struct { -- client Client -- conf *Config -- group string -- -- lock sync.Mutex -- poms map[string]map[int32]*partitionOffsetManager -- boms map[*Broker]*brokerOffsetManager --} -- --// NewOffsetManagerFromClient creates a new OffsetManager from the given client. --// It is still necessary to call Close() on the underlying client when finished with the partition manager. --func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) { -- // Check that we are not dealing with a closed Client before processing any other arguments -- if client.Closed() { -- return nil, ErrClosedClient -- } -- -- om := &offsetManager{ -- client: client, -- conf: client.Config(), -- group: group, -- poms: make(map[string]map[int32]*partitionOffsetManager), -- boms: make(map[*Broker]*brokerOffsetManager), -- } -- -- return om, nil --} -- --func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) { -- pom, err := om.newPartitionOffsetManager(topic, partition) -- if err != nil { -- return nil, err -- } -- -- om.lock.Lock() -- defer om.lock.Unlock() -- -- topicManagers := om.poms[topic] -- if topicManagers == nil { -- topicManagers = make(map[int32]*partitionOffsetManager) -- om.poms[topic] = topicManagers -- } -- -- if topicManagers[partition] != nil { -- return nil, ConfigurationError(""That topic/partition is already being managed"") -- } -- -- topicManagers[partition] = pom -- return pom, nil --} -- --func (om *offsetManager) Close() error { -- return nil --} -- --func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager { -- om.lock.Lock() -- defer om.lock.Unlock() -- -- bom := om.boms[broker] -- if bom == nil { -- bom = om.newBrokerOffsetManager(broker) -- om.boms[broker] = bom -- } -- -- bom.refs++ -- -- return bom --} -- --func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) { -- om.lock.Lock() -- defer om.lock.Unlock() -- -- bom.refs-- -- -- if bom.refs == 0 { -- close(bom.updateSubscriptions) -- if om.boms[bom.broker] == bom { -- delete(om.boms, bom.broker) -- } -- } --} -- --func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) { -- om.lock.Lock() -- defer om.lock.Unlock() -- -- delete(om.boms, bom.broker) --} -- --func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) { -- om.lock.Lock() -- defer om.lock.Unlock() -- -- delete(om.poms[pom.topic], pom.partition) -- if len(om.poms[pom.topic]) == 0 { -- delete(om.poms, pom.topic) -- } --} -- --// Partition Offset Manager -- --// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close() --// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes --// out of scope. --type PartitionOffsetManager interface { -- // NextOffset returns the next offset that should be consumed for the managed -- // partition, accompanied by metadata which can be used to reconstruct the state -- // of the partition consumer when it resumes. NextOffset() will return -- // `config.Consumer.Offsets.Initial` and an empty metadata string if no offset -- // was committed for this partition yet. -- NextOffset() (int64, string) -- -- // MarkOffset marks the provided offset, alongside a metadata string -- // that represents the state of the partition consumer at that point in time. The -- // metadata string can be used by another consumer to restore that state, so it -- // can resume consumption. -- // -- // To follow upstream conventions, you are expected to mark the offset of the -- // next message to read, not the last message read. Thus, when calling `MarkOffset` -- // you should typically add one to the offset of the last consumed message. -- // -- // Note: calling MarkOffset does not necessarily commit the offset to the backend -- // store immediately for efficiency reasons, and it may never be committed if -- // your application crashes. This means that you may end up processing the same -- // message twice, and your processing should ideally be idempotent. -- MarkOffset(offset int64, metadata string) -- -- // ResetOffset resets to the provided offset, alongside a metadata string that -- // represents the state of the partition consumer at that point in time. Reset -- // acts as a counterpart to MarkOffset, the difference being that it allows to -- // reset an offset to an earlier or smaller value, where MarkOffset only -- // allows incrementing the offset. cf MarkOffset for more details. -- ResetOffset(offset int64, metadata string) -- -- // Errors returns a read channel of errors that occur during offset management, if -- // enabled. By default, errors are logged and not returned over this channel. If -- // you want to implement any custom error handling, set your config's -- // Consumer.Return.Errors setting to true, and read from this channel. -- Errors() <-chan *ConsumerError -- -- // AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will -- // return immediately, after which you should wait until the 'errors' channel has -- // been drained and closed. It is required to call this function, or Close before -- // a consumer object passes out of scope, as it will otherwise leak memory. You -- // must call this before calling Close on the underlying client. -- AsyncClose() -- -- // Close stops the PartitionOffsetManager from managing offsets. It is required to -- // call this function (or AsyncClose) before a PartitionOffsetManager object -- // passes out of scope, as it will otherwise leak memory. You must call this -- // before calling Close on the underlying client. -- Close() error --} -- --type partitionOffsetManager struct { -- parent *offsetManager -- topic string -- partition int32 -- -- lock sync.Mutex -- offset int64 -- metadata string -- dirty bool -- clean sync.Cond -- broker *brokerOffsetManager -- -- errors chan *ConsumerError -- rebalance chan none -- dying chan none --} -- --func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) { -- pom := &partitionOffsetManager{ -- parent: om, -- topic: topic, -- partition: partition, -- errors: make(chan *ConsumerError, om.conf.ChannelBufferSize), -- rebalance: make(chan none, 1), -- dying: make(chan none), -- } -- pom.clean.L = &pom.lock -- -- if err := pom.selectBroker(); err != nil { -- return nil, err -- } -- -- if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil { -- return nil, err -- } -- -- pom.broker.updateSubscriptions <- pom -- -- go withRecover(pom.mainLoop) -- -- return pom, nil --} -- --func (pom *partitionOffsetManager) mainLoop() { -- for { -- select { -- case <-pom.rebalance: -- if err := pom.selectBroker(); err != nil { -- pom.handleError(err) -- pom.rebalance <- none{} -- } else { -- pom.broker.updateSubscriptions <- pom -- } -- case <-pom.dying: -- if pom.broker != nil { -- select { -- case <-pom.rebalance: -- case pom.broker.updateSubscriptions <- pom: -- } -- pom.parent.unrefBrokerOffsetManager(pom.broker) -- } -- pom.parent.abandonPartitionOffsetManager(pom) -- close(pom.errors) -- return -- } -- } --} -- --func (pom *partitionOffsetManager) selectBroker() error { -- if pom.broker != nil { -- pom.parent.unrefBrokerOffsetManager(pom.broker) -- pom.broker = nil -- } -- -- var broker *Broker -- var err error -- -- if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil { -- return err -- } -- -- if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil { -- return err -- } -- -- pom.broker = pom.parent.refBrokerOffsetManager(broker) -- return nil --} -- --func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error { -- request := new(OffsetFetchRequest) -- request.Version = 1 -- request.ConsumerGroup = pom.parent.group -- request.AddPartition(pom.topic, pom.partition) -- -- response, err := pom.broker.broker.FetchOffset(request) -- if err != nil { -- return err -- } -- -- block := response.GetBlock(pom.topic, pom.partition) -- if block == nil { -- return ErrIncompleteResponse -- } -- -- switch block.Err { -- case ErrNoError: -- pom.offset = block.Offset -- pom.metadata = block.Metadata -- return nil -- case ErrNotCoordinatorForConsumer: -- if retries <= 0 { -- return block.Err -- } -- if err := pom.selectBroker(); err != nil { -- return err -- } -- return pom.fetchInitialOffset(retries - 1) -- case ErrOffsetsLoadInProgress: -- if retries <= 0 { -- return block.Err -- } -- time.Sleep(pom.parent.conf.Metadata.Retry.Backoff) -- return pom.fetchInitialOffset(retries - 1) -- default: -- return block.Err -- } --} -- --func (pom *partitionOffsetManager) handleError(err error) { -- cErr := &ConsumerError{ -- Topic: pom.topic, -- Partition: pom.partition, -- Err: err, -- } -- -- if pom.parent.conf.Consumer.Return.Errors { -- pom.errors <- cErr -- } else { -- Logger.Println(cErr) -- } --} -- --func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError { -- return pom.errors --} -- --func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) { -- pom.lock.Lock() -- defer pom.lock.Unlock() -- -- if offset > pom.offset { -- pom.offset = offset -- pom.metadata = metadata -- pom.dirty = true -- } --} -- --func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) { -- pom.lock.Lock() -- defer pom.lock.Unlock() -- -- if offset <= pom.offset { -- pom.offset = offset -- pom.metadata = metadata -- pom.dirty = true -- } --} -- --func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) { -- pom.lock.Lock() -- defer pom.lock.Unlock() -- -- if pom.offset == offset && pom.metadata == metadata { -- pom.dirty = false -- pom.clean.Signal() -- } --} -- --func (pom *partitionOffsetManager) NextOffset() (int64, string) { -- pom.lock.Lock() -- defer pom.lock.Unlock() -- -- if pom.offset >= 0 { -- return pom.offset, pom.metadata -- } -- -- return pom.parent.conf.Consumer.Offsets.Initial, """" --} -- --func (pom *partitionOffsetManager) AsyncClose() { -- go func() { -- pom.lock.Lock() -- defer pom.lock.Unlock() -- -- for pom.dirty { -- pom.clean.Wait() -- } -- -- close(pom.dying) -- }() --} -- --func (pom *partitionOffsetManager) Close() error { -- pom.AsyncClose() -- -- var errors ConsumerErrors -- for err := range pom.errors { -- errors = append(errors, err) -- } -- -- if len(errors) > 0 { -- return errors -- } -- return nil --} -- --// Broker Offset Manager -- --type brokerOffsetManager struct { -- parent *offsetManager -- broker *Broker -- timer *time.Ticker -- updateSubscriptions chan *partitionOffsetManager -- subscriptions map[*partitionOffsetManager]none -- refs int --} -- --func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager { -- bom := &brokerOffsetManager{ -- parent: om, -- broker: broker, -- timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval), -- updateSubscriptions: make(chan *partitionOffsetManager), -- subscriptions: make(map[*partitionOffsetManager]none), -- } -- -- go withRecover(bom.mainLoop) -- -- return bom --} -- --func (bom *brokerOffsetManager) mainLoop() { -- for { -- select { -- case <-bom.timer.C: -- if len(bom.subscriptions) > 0 { -- bom.flushToBroker() -- } -- case s, ok := <-bom.updateSubscriptions: -- if !ok { -- bom.timer.Stop() -- return -- } -- if _, ok := bom.subscriptions[s]; ok { -- delete(bom.subscriptions, s) -- } else { -- bom.subscriptions[s] = none{} -- } -- } -- } --} -- --func (bom *brokerOffsetManager) flushToBroker() { -- request := bom.constructRequest() -- if request == nil { -- return -- } -- -- response, err := bom.broker.CommitOffset(request) -- -- if err != nil { -- bom.abort(err) -- return -- } -- -- for s := range bom.subscriptions { -- if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil { -- continue -- } -- -- var err KError -- var ok bool -- -- if response.Errors[s.topic] == nil { -- s.handleError(ErrIncompleteResponse) -- delete(bom.subscriptions, s) -- s.rebalance <- none{} -- continue -- } -- if err, ok = response.Errors[s.topic][s.partition]; !ok { -- s.handleError(ErrIncompleteResponse) -- delete(bom.subscriptions, s) -- s.rebalance <- none{} -- continue -- } -- -- switch err { -- case ErrNoError: -- block := request.blocks[s.topic][s.partition] -- s.updateCommitted(block.offset, block.metadata) -- case ErrNotLeaderForPartition, ErrLeaderNotAvailable, -- ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer: -- // not a critical error, we just need to redispatch -- delete(bom.subscriptions, s) -- s.rebalance <- none{} -- case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize: -- // nothing we can do about this, just tell the user and carry on -- s.handleError(err) -- case ErrOffsetsLoadInProgress: -- // nothing wrong but we didn't commit, we'll get it next time round -- break -- case ErrUnknownTopicOrPartition: -- // let the user know *and* try redispatching - if topic-auto-create is -- // enabled, redispatching should trigger a metadata request and create the -- // topic; if not then re-dispatching won't help, but we've let the user -- // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706) -- fallthrough -- default: -- // dunno, tell the user and try redispatching -- s.handleError(err) -- delete(bom.subscriptions, s) -- s.rebalance <- none{} -- } -- } --} -- --func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest { -- var r *OffsetCommitRequest -- var perPartitionTimestamp int64 -- if bom.parent.conf.Consumer.Offsets.Retention == 0 { -- perPartitionTimestamp = ReceiveTime -- r = &OffsetCommitRequest{ -- Version: 1, -- ConsumerGroup: bom.parent.group, -- ConsumerGroupGeneration: GroupGenerationUndefined, -- } -- } else { -- r = &OffsetCommitRequest{ -- Version: 2, -- RetentionTime: int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond), -- ConsumerGroup: bom.parent.group, -- ConsumerGroupGeneration: GroupGenerationUndefined, -- } -- -- } -- -- for s := range bom.subscriptions { -- s.lock.Lock() -- if s.dirty { -- r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata) -- } -- s.lock.Unlock() -- } -- -- if len(r.blocks) > 0 { -- return r -- } -- -- return nil --} -- --func (bom *brokerOffsetManager) abort(err error) { -- _ = bom.broker.Close() // we don't care about the error this might return, we already have one -- bom.parent.abandonBroker(bom) -- -- for pom := range bom.subscriptions { -- pom.handleError(err) -- pom.rebalance <- none{} -- } -- -- for s := range bom.updateSubscriptions { -- if _, ok := bom.subscriptions[s]; !ok { -- s.handleError(err) -- s.rebalance <- none{} -- } -- } -- -- bom.subscriptions = make(map[*partitionOffsetManager]none) --} -diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go -deleted file mode 100644 -index 6c269601647cd..0000000000000 ---- a/vendor/github.com/Shopify/sarama/offset_request.go -+++ /dev/null -@@ -1,132 +0,0 @@ --package sarama -- --type offsetRequestBlock struct { -- time int64 -- maxOffsets int32 // Only used in version 0 --} -- --func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error { -- pe.putInt64(int64(b.time)) -- if version == 0 { -- pe.putInt32(b.maxOffsets) -- } -- -- return nil --} -- --func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) { -- if b.time, err = pd.getInt64(); err != nil { -- return err -- } -- if version == 0 { -- if b.maxOffsets, err = pd.getInt32(); err != nil { -- return err -- } -- } -- return nil --} -- --type OffsetRequest struct { -- Version int16 -- blocks map[string]map[int32]*offsetRequestBlock --} -- --func (r *OffsetRequest) encode(pe packetEncoder) error { -- pe.putInt32(-1) // replica ID is always -1 for clients -- err := pe.putArrayLength(len(r.blocks)) -- if err != nil { -- return err -- } -- for topic, partitions := range r.blocks { -- err = pe.putString(topic) -- if err != nil { -- return err -- } -- err = pe.putArrayLength(len(partitions)) -- if err != nil { -- return err -- } -- for partition, block := range partitions { -- pe.putInt32(partition) -- if err = block.encode(pe, r.Version); err != nil { -- return err -- } -- } -- } -- return nil --} -- --func (r *OffsetRequest) decode(pd packetDecoder, version int16) error { -- r.Version = version -- -- // Ignore replica ID -- if _, err := pd.getInt32(); err != nil { -- return err -- } -- blockCount, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- if blockCount == 0 { -- return nil -- } -- r.blocks = make(map[string]map[int32]*offsetRequestBlock) -- for i := 0; i < blockCount; i++ { -- topic, err := pd.getString() -- if err != nil { -- return err -- } -- partitionCount, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- r.blocks[topic] = make(map[int32]*offsetRequestBlock) -- for j := 0; j < partitionCount; j++ { -- partition, err := pd.getInt32() -- if err != nil { -- return err -- } -- block := &offsetRequestBlock{} -- if err := block.decode(pd, version); err != nil { -- return err -- } -- r.blocks[topic][partition] = block -- } -- } -- return nil --} -- --func (r *OffsetRequest) key() int16 { -- return 2 --} -- --func (r *OffsetRequest) version() int16 { -- return r.Version --} -- --func (r *OffsetRequest) requiredVersion() KafkaVersion { -- switch r.Version { -- case 1: -- return V0_10_1_0 -- default: -- return minVersion -- } --} -- --func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) { -- if r.blocks == nil { -- r.blocks = make(map[string]map[int32]*offsetRequestBlock) -- } -- -- if r.blocks[topic] == nil { -- r.blocks[topic] = make(map[int32]*offsetRequestBlock) -- } -- -- tmp := new(offsetRequestBlock) -- tmp.time = time -- if r.Version == 0 { -- tmp.maxOffsets = maxOffsets -- } -- -- r.blocks[topic][partitionID] = tmp --} -diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go -deleted file mode 100644 -index 9a9cfe96f3bae..0000000000000 ---- a/vendor/github.com/Shopify/sarama/offset_response.go -+++ /dev/null -@@ -1,174 +0,0 @@ --package sarama -- --type OffsetResponseBlock struct { -- Err KError -- Offsets []int64 // Version 0 -- Offset int64 // Version 1 -- Timestamp int64 // Version 1 --} -- --func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) { -- tmp, err := pd.getInt16() -- if err != nil { -- return err -- } -- b.Err = KError(tmp) -- -- if version == 0 { -- b.Offsets, err = pd.getInt64Array() -- -- return err -- } -- -- b.Timestamp, err = pd.getInt64() -- if err != nil { -- return err -- } -- -- b.Offset, err = pd.getInt64() -- if err != nil { -- return err -- } -- -- // For backwards compatibility put the offset in the offsets array too -- b.Offsets = []int64{b.Offset} -- -- return nil --} -- --func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) { -- pe.putInt16(int16(b.Err)) -- -- if version == 0 { -- return pe.putInt64Array(b.Offsets) -- } -- -- pe.putInt64(b.Timestamp) -- pe.putInt64(b.Offset) -- -- return nil --} -- --type OffsetResponse struct { -- Version int16 -- Blocks map[string]map[int32]*OffsetResponseBlock --} -- --func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) { -- numTopics, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics) -- for i := 0; i < numTopics; i++ { -- name, err := pd.getString() -- if err != nil { -- return err -- } -- -- numBlocks, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks) -- -- for j := 0; j < numBlocks; j++ { -- id, err := pd.getInt32() -- if err != nil { -- return err -- } -- -- block := new(OffsetResponseBlock) -- err = block.decode(pd, version) -- if err != nil { -- return err -- } -- r.Blocks[name][id] = block -- } -- } -- -- return nil --} -- --func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock { -- if r.Blocks == nil { -- return nil -- } -- -- if r.Blocks[topic] == nil { -- return nil -- } -- -- return r.Blocks[topic][partition] --} -- --/* --// [0 0 0 1 ntopics --0 8 109 121 95 116 111 112 105 99 topic --0 0 0 1 npartitions --0 0 0 0 id --0 0 -- --0 0 0 1 0 0 0 0 --0 1 1 1 0 0 0 1 --0 8 109 121 95 116 111 112 --105 99 0 0 0 1 0 0 --0 0 0 0 0 0 0 1 --0 0 0 0 0 1 1 1] -- --*/ --func (r *OffsetResponse) encode(pe packetEncoder) (err error) { -- if err = pe.putArrayLength(len(r.Blocks)); err != nil { -- return err -- } -- -- for topic, partitions := range r.Blocks { -- if err = pe.putString(topic); err != nil { -- return err -- } -- if err = pe.putArrayLength(len(partitions)); err != nil { -- return err -- } -- for partition, block := range partitions { -- pe.putInt32(partition) -- if err = block.encode(pe, r.version()); err != nil { -- return err -- } -- } -- } -- -- return nil --} -- --func (r *OffsetResponse) key() int16 { -- return 2 --} -- --func (r *OffsetResponse) version() int16 { -- return r.Version --} -- --func (r *OffsetResponse) requiredVersion() KafkaVersion { -- switch r.Version { -- case 1: -- return V0_10_1_0 -- default: -- return minVersion -- } --} -- --// testing API -- --func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) { -- if r.Blocks == nil { -- r.Blocks = make(map[string]map[int32]*OffsetResponseBlock) -- } -- byTopic, ok := r.Blocks[topic] -- if !ok { -- byTopic = make(map[int32]*OffsetResponseBlock) -- r.Blocks[topic] = byTopic -- } -- byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset} --} -diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go -deleted file mode 100644 -index 74805ccbf53f7..0000000000000 ---- a/vendor/github.com/Shopify/sarama/packet_decoder.go -+++ /dev/null -@@ -1,60 +0,0 @@ --package sarama -- --// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules. --// Types implementing Decoder only need to worry about calling methods like GetString, --// not about how a string is represented in Kafka. --type packetDecoder interface { -- // Primitives -- getInt8() (int8, error) -- getInt16() (int16, error) -- getInt32() (int32, error) -- getInt64() (int64, error) -- getVarint() (int64, error) -- getArrayLength() (int, error) -- getBool() (bool, error) -- -- // Collections -- getBytes() ([]byte, error) -- getVarintBytes() ([]byte, error) -- getRawBytes(length int) ([]byte, error) -- getString() (string, error) -- getNullableString() (*string, error) -- getInt32Array() ([]int32, error) -- getInt64Array() ([]int64, error) -- getStringArray() ([]string, error) -- -- // Subsets -- remaining() int -- getSubset(length int) (packetDecoder, error) -- peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset -- -- // Stacks, see PushDecoder -- push(in pushDecoder) error -- pop() error --} -- --// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity --// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where --// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they --// depend upon have been decoded. --type pushDecoder interface { -- // Saves the offset into the input buffer as the location to actually read the calculated value when able. -- saveOffset(in int) -- -- // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32). -- reserveLength() int -- -- // Indicates that all required data is now available to calculate and check the field. -- // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes -- // of data from the saved offset, and verify it based on the data between the saved offset and curOffset. -- check(curOffset int, buf []byte) error --} -- --// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the --// fields itself is unknown until its value was decoded (for instance varint encoded length --// fields). --// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength() --type dynamicPushDecoder interface { -- pushDecoder -- decoder --} -diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go -deleted file mode 100644 -index 67b8daed829c2..0000000000000 ---- a/vendor/github.com/Shopify/sarama/packet_encoder.go -+++ /dev/null -@@ -1,65 +0,0 @@ --package sarama -- --import ""github.com/rcrowley/go-metrics"" -- --// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules. --// Types implementing Encoder only need to worry about calling methods like PutString, --// not about how a string is represented in Kafka. --type packetEncoder interface { -- // Primitives -- putInt8(in int8) -- putInt16(in int16) -- putInt32(in int32) -- putInt64(in int64) -- putVarint(in int64) -- putArrayLength(in int) error -- putBool(in bool) -- -- // Collections -- putBytes(in []byte) error -- putVarintBytes(in []byte) error -- putRawBytes(in []byte) error -- putString(in string) error -- putNullableString(in *string) error -- putStringArray(in []string) error -- putInt32Array(in []int32) error -- putInt64Array(in []int64) error -- -- // Provide the current offset to record the batch size metric -- offset() int -- -- // Stacks, see PushEncoder -- push(in pushEncoder) -- pop() error -- -- // To record metrics when provided -- metricRegistry() metrics.Registry --} -- --// PushEncoder is the interface for encoding fields like CRCs and lengths where the value --// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where --// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they --// depend upon have been written. --type pushEncoder interface { -- // Saves the offset into the input buffer as the location to actually write the calculated value when able. -- saveOffset(in int) -- -- // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32). -- reserveLength() int -- -- // Indicates that all required data is now available to calculate and write the field. -- // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes -- // of data to the saved offset, based on the data between the saved offset and curOffset. -- run(curOffset int, buf []byte) error --} -- --// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the --// fields itself is unknown until its value was computed (for instance varint encoded length --// fields). --type dynamicPushEncoder interface { -- pushEncoder -- -- // Called during pop() to adjust the length of the field. -- // It should return the difference in bytes between the last computed length and current length. -- adjustLength(currOffset int) int --} -diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go -deleted file mode 100644 -index 972932728a54b..0000000000000 ---- a/vendor/github.com/Shopify/sarama/partitioner.go -+++ /dev/null -@@ -1,135 +0,0 @@ --package sarama -- --import ( -- ""hash"" -- ""hash/fnv"" -- ""math/rand"" -- ""time"" --) -- --// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1], --// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided --// as simple default implementations. --type Partitioner interface { -- // Partition takes a message and partition count and chooses a partition -- Partition(message *ProducerMessage, numPartitions int32) (int32, error) -- -- // RequiresConsistency indicates to the user of the partitioner whether the -- // mapping of key->partition is consistent or not. Specifically, if a -- // partitioner requires consistency then it must be allowed to choose from all -- // partitions (even ones known to be unavailable), and its choice must be -- // respected by the caller. The obvious example is the HashPartitioner. -- RequiresConsistency() bool --} -- --// PartitionerConstructor is the type for a function capable of constructing new Partitioners. --type PartitionerConstructor func(topic string) Partitioner -- --type manualPartitioner struct{} -- --// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided --// ProducerMessage's Partition field as the partition to produce to. --func NewManualPartitioner(topic string) Partitioner { -- return new(manualPartitioner) --} -- --func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { -- return message.Partition, nil --} -- --func (p *manualPartitioner) RequiresConsistency() bool { -- return true --} -- --type randomPartitioner struct { -- generator *rand.Rand --} -- --// NewRandomPartitioner returns a Partitioner which chooses a random partition each time. --func NewRandomPartitioner(topic string) Partitioner { -- p := new(randomPartitioner) -- p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano())) -- return p --} -- --func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { -- return int32(p.generator.Intn(int(numPartitions))), nil --} -- --func (p *randomPartitioner) RequiresConsistency() bool { -- return false --} -- --type roundRobinPartitioner struct { -- partition int32 --} -- --// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time. --func NewRoundRobinPartitioner(topic string) Partitioner { -- return &roundRobinPartitioner{} --} -- --func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { -- if p.partition >= numPartitions { -- p.partition = 0 -- } -- ret := p.partition -- p.partition++ -- return ret, nil --} -- --func (p *roundRobinPartitioner) RequiresConsistency() bool { -- return false --} -- --type hashPartitioner struct { -- random Partitioner -- hasher hash.Hash32 --} -- --// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher. --// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that --// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance. --func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor { -- return func(topic string) Partitioner { -- p := new(hashPartitioner) -- p.random = NewRandomPartitioner(topic) -- p.hasher = hasher() -- return p -- } --} -- --// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a --// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used, --// modulus the number of partitions. This ensures that messages with the same key always end up on the --// same partition. --func NewHashPartitioner(topic string) Partitioner { -- p := new(hashPartitioner) -- p.random = NewRandomPartitioner(topic) -- p.hasher = fnv.New32a() -- return p --} -- --func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { -- if message.Key == nil { -- return p.random.Partition(message, numPartitions) -- } -- bytes, err := message.Key.Encode() -- if err != nil { -- return -1, err -- } -- p.hasher.Reset() -- _, err = p.hasher.Write(bytes) -- if err != nil { -- return -1, err -- } -- partition := int32(p.hasher.Sum32()) % numPartitions -- if partition < 0 { -- partition = -partition -- } -- return partition, nil --} -- --func (p *hashPartitioner) RequiresConsistency() bool { -- return true --} -diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go -deleted file mode 100644 -index b633cd151113c..0000000000000 ---- a/vendor/github.com/Shopify/sarama/prep_encoder.go -+++ /dev/null -@@ -1,153 +0,0 @@ --package sarama -- --import ( -- ""encoding/binary"" -- ""fmt"" -- ""math"" -- -- ""github.com/rcrowley/go-metrics"" --) -- --type prepEncoder struct { -- stack []pushEncoder -- length int --} -- --// primitives -- --func (pe *prepEncoder) putInt8(in int8) { -- pe.length++ --} -- --func (pe *prepEncoder) putInt16(in int16) { -- pe.length += 2 --} -- --func (pe *prepEncoder) putInt32(in int32) { -- pe.length += 4 --} -- --func (pe *prepEncoder) putInt64(in int64) { -- pe.length += 8 --} -- --func (pe *prepEncoder) putVarint(in int64) { -- var buf [binary.MaxVarintLen64]byte -- pe.length += binary.PutVarint(buf[:], in) --} -- --func (pe *prepEncoder) putArrayLength(in int) error { -- if in > math.MaxInt32 { -- return PacketEncodingError{fmt.Sprintf(""array too long (%d)"", in)} -- } -- pe.length += 4 -- return nil --} -- --func (pe *prepEncoder) putBool(in bool) { -- pe.length++ --} -- --// arrays -- --func (pe *prepEncoder) putBytes(in []byte) error { -- pe.length += 4 -- if in == nil { -- return nil -- } -- return pe.putRawBytes(in) --} -- --func (pe *prepEncoder) putVarintBytes(in []byte) error { -- if in == nil { -- pe.putVarint(-1) -- return nil -- } -- pe.putVarint(int64(len(in))) -- return pe.putRawBytes(in) --} -- --func (pe *prepEncoder) putRawBytes(in []byte) error { -- if len(in) > math.MaxInt32 { -- return PacketEncodingError{fmt.Sprintf(""byteslice too long (%d)"", len(in))} -- } -- pe.length += len(in) -- return nil --} -- --func (pe *prepEncoder) putNullableString(in *string) error { -- if in == nil { -- pe.length += 2 -- return nil -- } -- return pe.putString(*in) --} -- --func (pe *prepEncoder) putString(in string) error { -- pe.length += 2 -- if len(in) > math.MaxInt16 { -- return PacketEncodingError{fmt.Sprintf(""string too long (%d)"", len(in))} -- } -- pe.length += len(in) -- return nil --} -- --func (pe *prepEncoder) putStringArray(in []string) error { -- err := pe.putArrayLength(len(in)) -- if err != nil { -- return err -- } -- -- for _, str := range in { -- if err := pe.putString(str); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (pe *prepEncoder) putInt32Array(in []int32) error { -- err := pe.putArrayLength(len(in)) -- if err != nil { -- return err -- } -- pe.length += 4 * len(in) -- return nil --} -- --func (pe *prepEncoder) putInt64Array(in []int64) error { -- err := pe.putArrayLength(len(in)) -- if err != nil { -- return err -- } -- pe.length += 8 * len(in) -- return nil --} -- --func (pe *prepEncoder) offset() int { -- return pe.length --} -- --// stackable -- --func (pe *prepEncoder) push(in pushEncoder) { -- in.saveOffset(pe.length) -- pe.length += in.reserveLength() -- pe.stack = append(pe.stack, in) --} -- --func (pe *prepEncoder) pop() error { -- in := pe.stack[len(pe.stack)-1] -- pe.stack = pe.stack[:len(pe.stack)-1] -- if dpe, ok := in.(dynamicPushEncoder); ok { -- pe.length += dpe.adjustLength(pe.length) -- } -- -- return nil --} -- --// we do not record metrics during the prep encoder pass --func (pe *prepEncoder) metricRegistry() metrics.Registry { -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go -deleted file mode 100644 -index 0ec4d8d53f979..0000000000000 ---- a/vendor/github.com/Shopify/sarama/produce_request.go -+++ /dev/null -@@ -1,252 +0,0 @@ --package sarama -- --import ""github.com/rcrowley/go-metrics"" -- --// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements --// it must see before responding. Any of the constants defined here are valid. On broker versions --// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many --// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced --// by setting the `min.isr` value in the brokers configuration). --type RequiredAcks int16 -- --const ( -- // NoResponse doesn't send any response, the TCP ACK is all you get. -- NoResponse RequiredAcks = 0 -- // WaitForLocal waits for only the local commit to succeed before responding. -- WaitForLocal RequiredAcks = 1 -- // WaitForAll waits for all in-sync replicas to commit before responding. -- // The minimum number of in-sync replicas is configured on the broker via -- // the `min.insync.replicas` configuration key. -- WaitForAll RequiredAcks = -1 --) -- --type ProduceRequest struct { -- TransactionalID *string -- RequiredAcks RequiredAcks -- Timeout int32 -- Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11 -- records map[string]map[int32]Records --} -- --func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram, -- topicCompressionRatioMetric metrics.Histogram) int64 { -- var topicRecordCount int64 -- for _, messageBlock := range msgSet.Messages { -- // Is this a fake ""message"" wrapping real messages? -- if messageBlock.Msg.Set != nil { -- topicRecordCount += int64(len(messageBlock.Msg.Set.Messages)) -- } else { -- // A single uncompressed message -- topicRecordCount++ -- } -- // Better be safe than sorry when computing the compression ratio -- if messageBlock.Msg.compressedSize != 0 { -- compressionRatio := float64(len(messageBlock.Msg.Value)) / -- float64(messageBlock.Msg.compressedSize) -- // Histogram do not support decimal values, let's multiple it by 100 for better precision -- intCompressionRatio := int64(100 * compressionRatio) -- compressionRatioMetric.Update(intCompressionRatio) -- topicCompressionRatioMetric.Update(intCompressionRatio) -- } -- } -- return topicRecordCount --} -- --func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram, -- topicCompressionRatioMetric metrics.Histogram) int64 { -- if recordBatch.compressedRecords != nil { -- compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100) -- compressionRatioMetric.Update(compressionRatio) -- topicCompressionRatioMetric.Update(compressionRatio) -- } -- -- return int64(len(recordBatch.Records)) --} -- --func (r *ProduceRequest) encode(pe packetEncoder) error { -- if r.Version >= 3 { -- if err := pe.putNullableString(r.TransactionalID); err != nil { -- return err -- } -- } -- pe.putInt16(int16(r.RequiredAcks)) -- pe.putInt32(r.Timeout) -- metricRegistry := pe.metricRegistry() -- var batchSizeMetric metrics.Histogram -- var compressionRatioMetric metrics.Histogram -- if metricRegistry != nil { -- batchSizeMetric = getOrRegisterHistogram(""batch-size"", metricRegistry) -- compressionRatioMetric = getOrRegisterHistogram(""compression-ratio"", metricRegistry) -- } -- totalRecordCount := int64(0) -- -- err := pe.putArrayLength(len(r.records)) -- if err != nil { -- return err -- } -- -- for topic, partitions := range r.records { -- err = pe.putString(topic) -- if err != nil { -- return err -- } -- err = pe.putArrayLength(len(partitions)) -- if err != nil { -- return err -- } -- topicRecordCount := int64(0) -- var topicCompressionRatioMetric metrics.Histogram -- if metricRegistry != nil { -- topicCompressionRatioMetric = getOrRegisterTopicHistogram(""compression-ratio"", topic, metricRegistry) -- } -- for id, records := range partitions { -- startOffset := pe.offset() -- pe.putInt32(id) -- pe.push(&lengthField{}) -- err = records.encode(pe) -- if err != nil { -- return err -- } -- err = pe.pop() -- if err != nil { -- return err -- } -- if metricRegistry != nil { -- if r.Version >= 3 { -- topicRecordCount += updateBatchMetrics(records.recordBatch, compressionRatioMetric, topicCompressionRatioMetric) -- } else { -- topicRecordCount += updateMsgSetMetrics(records.msgSet, compressionRatioMetric, topicCompressionRatioMetric) -- } -- batchSize := int64(pe.offset() - startOffset) -- batchSizeMetric.Update(batchSize) -- getOrRegisterTopicHistogram(""batch-size"", topic, metricRegistry).Update(batchSize) -- } -- } -- if topicRecordCount > 0 { -- getOrRegisterTopicMeter(""record-send-rate"", topic, metricRegistry).Mark(topicRecordCount) -- getOrRegisterTopicHistogram(""records-per-request"", topic, metricRegistry).Update(topicRecordCount) -- totalRecordCount += topicRecordCount -- } -- } -- if totalRecordCount > 0 { -- metrics.GetOrRegisterMeter(""record-send-rate"", metricRegistry).Mark(totalRecordCount) -- getOrRegisterHistogram(""records-per-request"", metricRegistry).Update(totalRecordCount) -- } -- -- return nil --} -- --func (r *ProduceRequest) decode(pd packetDecoder, version int16) error { -- r.Version = version -- -- if version >= 3 { -- id, err := pd.getNullableString() -- if err != nil { -- return err -- } -- r.TransactionalID = id -- } -- requiredAcks, err := pd.getInt16() -- if err != nil { -- return err -- } -- r.RequiredAcks = RequiredAcks(requiredAcks) -- if r.Timeout, err = pd.getInt32(); err != nil { -- return err -- } -- topicCount, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- if topicCount == 0 { -- return nil -- } -- -- r.records = make(map[string]map[int32]Records) -- for i := 0; i < topicCount; i++ { -- topic, err := pd.getString() -- if err != nil { -- return err -- } -- partitionCount, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- r.records[topic] = make(map[int32]Records) -- -- for j := 0; j < partitionCount; j++ { -- partition, err := pd.getInt32() -- if err != nil { -- return err -- } -- size, err := pd.getInt32() -- if err != nil { -- return err -- } -- recordsDecoder, err := pd.getSubset(int(size)) -- if err != nil { -- return err -- } -- var records Records -- if err := records.decode(recordsDecoder); err != nil { -- return err -- } -- r.records[topic][partition] = records -- } -- } -- -- return nil --} -- --func (r *ProduceRequest) key() int16 { -- return 0 --} -- --func (r *ProduceRequest) version() int16 { -- return r.Version --} -- --func (r *ProduceRequest) requiredVersion() KafkaVersion { -- switch r.Version { -- case 1: -- return V0_9_0_0 -- case 2: -- return V0_10_0_0 -- case 3: -- return V0_11_0_0 -- default: -- return minVersion -- } --} -- --func (r *ProduceRequest) ensureRecords(topic string, partition int32) { -- if r.records == nil { -- r.records = make(map[string]map[int32]Records) -- } -- -- if r.records[topic] == nil { -- r.records[topic] = make(map[int32]Records) -- } --} -- --func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) { -- r.ensureRecords(topic, partition) -- set := r.records[topic][partition].msgSet -- -- if set == nil { -- set = new(MessageSet) -- r.records[topic][partition] = newLegacyRecords(set) -- } -- -- set.addMessage(msg) --} -- --func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) { -- r.ensureRecords(topic, partition) -- r.records[topic][partition] = newLegacyRecords(set) --} -- --func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) { -- r.ensureRecords(topic, partition) -- r.records[topic][partition] = newDefaultRecords(batch) --} -diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go -deleted file mode 100644 -index 043c40f877230..0000000000000 ---- a/vendor/github.com/Shopify/sarama/produce_response.go -+++ /dev/null -@@ -1,183 +0,0 @@ --package sarama -- --import ( -- ""fmt"" -- ""time"" --) -- --type ProduceResponseBlock struct { -- Err KError -- Offset int64 -- // only provided if Version >= 2 and the broker is configured with `LogAppendTime` -- Timestamp time.Time --} -- --func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) { -- tmp, err := pd.getInt16() -- if err != nil { -- return err -- } -- b.Err = KError(tmp) -- -- b.Offset, err = pd.getInt64() -- if err != nil { -- return err -- } -- -- if version >= 2 { -- if millis, err := pd.getInt64(); err != nil { -- return err -- } else if millis != -1 { -- b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) -- } -- } -- -- return nil --} -- --func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) { -- pe.putInt16(int16(b.Err)) -- pe.putInt64(b.Offset) -- -- if version >= 2 { -- timestamp := int64(-1) -- if !b.Timestamp.Before(time.Unix(0, 0)) { -- timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond) -- } else if !b.Timestamp.IsZero() { -- return PacketEncodingError{fmt.Sprintf(""invalid timestamp (%v)"", b.Timestamp)} -- } -- pe.putInt64(timestamp) -- } -- -- return nil --} -- --type ProduceResponse struct { -- Blocks map[string]map[int32]*ProduceResponseBlock -- Version int16 -- ThrottleTime time.Duration // only provided if Version >= 1 --} -- --func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) { -- r.Version = version -- -- numTopics, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics) -- for i := 0; i < numTopics; i++ { -- name, err := pd.getString() -- if err != nil { -- return err -- } -- -- numBlocks, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks) -- -- for j := 0; j < numBlocks; j++ { -- id, err := pd.getInt32() -- if err != nil { -- return err -- } -- -- block := new(ProduceResponseBlock) -- err = block.decode(pd, version) -- if err != nil { -- return err -- } -- r.Blocks[name][id] = block -- } -- } -- -- if r.Version >= 1 { -- millis, err := pd.getInt32() -- if err != nil { -- return err -- } -- -- r.ThrottleTime = time.Duration(millis) * time.Millisecond -- } -- -- return nil --} -- --func (r *ProduceResponse) encode(pe packetEncoder) error { -- err := pe.putArrayLength(len(r.Blocks)) -- if err != nil { -- return err -- } -- for topic, partitions := range r.Blocks { -- err = pe.putString(topic) -- if err != nil { -- return err -- } -- err = pe.putArrayLength(len(partitions)) -- if err != nil { -- return err -- } -- for id, prb := range partitions { -- pe.putInt32(id) -- err = prb.encode(pe, r.Version) -- if err != nil { -- return err -- } -- } -- } -- if r.Version >= 1 { -- pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) -- } -- return nil --} -- --func (r *ProduceResponse) key() int16 { -- return 0 --} -- --func (r *ProduceResponse) version() int16 { -- return r.Version --} -- --func (r *ProduceResponse) requiredVersion() KafkaVersion { -- switch r.Version { -- case 1: -- return V0_9_0_0 -- case 2: -- return V0_10_0_0 -- case 3: -- return V0_11_0_0 -- default: -- return minVersion -- } --} -- --func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { -- if r.Blocks == nil { -- return nil -- } -- -- if r.Blocks[topic] == nil { -- return nil -- } -- -- return r.Blocks[topic][partition] --} -- --// Testing API -- --func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) { -- if r.Blocks == nil { -- r.Blocks = make(map[string]map[int32]*ProduceResponseBlock) -- } -- byTopic, ok := r.Blocks[topic] -- if !ok { -- byTopic = make(map[int32]*ProduceResponseBlock) -- r.Blocks[topic] = byTopic -- } -- byTopic[partition] = &ProduceResponseBlock{Err: err} --} -diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go -deleted file mode 100644 -index 3cbaeb5f90ea3..0000000000000 ---- a/vendor/github.com/Shopify/sarama/produce_set.go -+++ /dev/null -@@ -1,243 +0,0 @@ --package sarama -- --import ( -- ""encoding/binary"" -- ""time"" --) -- --type partitionSet struct { -- msgs []*ProducerMessage -- recordsToSend Records -- bufferBytes int --} -- --type produceSet struct { -- parent *asyncProducer -- msgs map[string]map[int32]*partitionSet -- -- bufferBytes int -- bufferCount int --} -- --func newProduceSet(parent *asyncProducer) *produceSet { -- return &produceSet{ -- msgs: make(map[string]map[int32]*partitionSet), -- parent: parent, -- } --} -- --func (ps *produceSet) add(msg *ProducerMessage) error { -- var err error -- var key, val []byte -- -- if msg.Key != nil { -- if key, err = msg.Key.Encode(); err != nil { -- return err -- } -- } -- -- if msg.Value != nil { -- if val, err = msg.Value.Encode(); err != nil { -- return err -- } -- } -- -- timestamp := msg.Timestamp -- if msg.Timestamp.IsZero() { -- timestamp = time.Now() -- } -- -- partitions := ps.msgs[msg.Topic] -- if partitions == nil { -- partitions = make(map[int32]*partitionSet) -- ps.msgs[msg.Topic] = partitions -- } -- -- var size int -- -- set := partitions[msg.Partition] -- if set == nil { -- if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { -- batch := &RecordBatch{ -- FirstTimestamp: timestamp, -- Version: 2, -- ProducerID: -1, /* No producer id */ -- Codec: ps.parent.conf.Producer.Compression, -- } -- set = &partitionSet{recordsToSend: newDefaultRecords(batch)} -- size = recordBatchOverhead -- } else { -- set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))} -- } -- partitions[msg.Partition] = set -- } -- -- set.msgs = append(set.msgs, msg) -- if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { -- // We are being conservative here to avoid having to prep encode the record -- size += maximumRecordOverhead -- rec := &Record{ -- Key: key, -- Value: val, -- TimestampDelta: timestamp.Sub(set.recordsToSend.recordBatch.FirstTimestamp), -- } -- size += len(key) + len(val) -- if len(msg.Headers) > 0 { -- rec.Headers = make([]*RecordHeader, len(msg.Headers)) -- for i := range msg.Headers { -- rec.Headers[i] = &msg.Headers[i] -- size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32 -- } -- } -- set.recordsToSend.recordBatch.addRecord(rec) -- } else { -- msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val} -- if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { -- msgToSend.Timestamp = timestamp -- msgToSend.Version = 1 -- } -- set.recordsToSend.msgSet.addMessage(msgToSend) -- size = producerMessageOverhead + len(key) + len(val) -- } -- -- set.bufferBytes += size -- ps.bufferBytes += size -- ps.bufferCount++ -- -- return nil --} -- --func (ps *produceSet) buildRequest() *ProduceRequest { -- req := &ProduceRequest{ -- RequiredAcks: ps.parent.conf.Producer.RequiredAcks, -- Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond), -- } -- if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { -- req.Version = 2 -- } -- if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { -- req.Version = 3 -- } -- -- for topic, partitionSet := range ps.msgs { -- for partition, set := range partitionSet { -- if req.Version >= 3 { -- rb := set.recordsToSend.recordBatch -- if len(rb.Records) > 0 { -- rb.LastOffsetDelta = int32(len(rb.Records) - 1) -- for i, record := range rb.Records { -- record.OffsetDelta = int64(i) -- } -- } -- -- req.AddBatch(topic, partition, rb) -- continue -- } -- if ps.parent.conf.Producer.Compression == CompressionNone { -- req.AddSet(topic, partition, set.recordsToSend.msgSet) -- } else { -- // When compression is enabled, the entire set for each partition is compressed -- // and sent as the payload of a single fake ""message"" with the appropriate codec -- // set and no key. When the server sees a message with a compression codec, it -- // decompresses the payload and treats the result as its message set. -- -- if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { -- // If our version is 0.10 or later, assign relative offsets -- // to the inner messages. This lets the broker avoid -- // recompressing the message set. -- // (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets -- // for details on relative offsets.) -- for i, msg := range set.recordsToSend.msgSet.Messages { -- msg.Offset = int64(i) -- } -- } -- payload, err := encode(set.recordsToSend.msgSet, ps.parent.conf.MetricRegistry) -- if err != nil { -- Logger.Println(err) // if this happens, it's basically our fault. -- panic(err) -- } -- compMsg := &Message{ -- Codec: ps.parent.conf.Producer.Compression, -- Key: nil, -- Value: payload, -- Set: set.recordsToSend.msgSet, // Provide the underlying message set for accurate metrics -- } -- if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { -- compMsg.Version = 1 -- compMsg.Timestamp = set.recordsToSend.msgSet.Messages[0].Msg.Timestamp -- } -- req.AddMessage(topic, partition, compMsg) -- } -- } -- } -- -- return req --} -- --func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) { -- for topic, partitionSet := range ps.msgs { -- for partition, set := range partitionSet { -- cb(topic, partition, set.msgs) -- } -- } --} -- --func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage { -- if ps.msgs[topic] == nil { -- return nil -- } -- set := ps.msgs[topic][partition] -- if set == nil { -- return nil -- } -- ps.bufferBytes -= set.bufferBytes -- ps.bufferCount -= len(set.msgs) -- delete(ps.msgs[topic], partition) -- return set.msgs --} -- --func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool { -- version := 1 -- if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { -- version = 2 -- } -- -- switch { -- // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. -- case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)): -- return true -- // Would we overflow the size-limit of a compressed message-batch for this partition? -- case ps.parent.conf.Producer.Compression != CompressionNone && -- ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && -- ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes: -- return true -- // Would we overflow simply in number of messages? -- case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages: -- return true -- default: -- return false -- } --} -- --func (ps *produceSet) readyToFlush() bool { -- switch { -- // If we don't have any messages, nothing else matters -- case ps.empty(): -- return false -- // If all three config values are 0, we always flush as-fast-as-possible -- case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0: -- return true -- // If we've passed the message trigger-point -- case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages: -- return true -- // If we've passed the byte trigger-point -- case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes: -- return true -- default: -- return false -- } --} -- --func (ps *produceSet) empty() bool { -- return ps.bufferCount == 0 --} -diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go -deleted file mode 100644 -index 23045e7d33ab5..0000000000000 ---- a/vendor/github.com/Shopify/sarama/real_decoder.go -+++ /dev/null -@@ -1,324 +0,0 @@ --package sarama -- --import ( -- ""encoding/binary"" -- ""math"" --) -- --var errInvalidArrayLength = PacketDecodingError{""invalid array length""} --var errInvalidByteSliceLength = PacketDecodingError{""invalid byteslice length""} --var errInvalidByteSliceLengthType = PacketDecodingError{""invalid byteslice length type""} --var errInvalidStringLength = PacketDecodingError{""invalid string length""} --var errInvalidSubsetSize = PacketDecodingError{""invalid subset size""} --var errVarintOverflow = PacketDecodingError{""varint overflow""} --var errInvalidBool = PacketDecodingError{""invalid bool""} -- --type realDecoder struct { -- raw []byte -- off int -- stack []pushDecoder --} -- --// primitives -- --func (rd *realDecoder) getInt8() (int8, error) { -- if rd.remaining() < 1 { -- rd.off = len(rd.raw) -- return -1, ErrInsufficientData -- } -- tmp := int8(rd.raw[rd.off]) -- rd.off++ -- return tmp, nil --} -- --func (rd *realDecoder) getInt16() (int16, error) { -- if rd.remaining() < 2 { -- rd.off = len(rd.raw) -- return -1, ErrInsufficientData -- } -- tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:])) -- rd.off += 2 -- return tmp, nil --} -- --func (rd *realDecoder) getInt32() (int32, error) { -- if rd.remaining() < 4 { -- rd.off = len(rd.raw) -- return -1, ErrInsufficientData -- } -- tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) -- rd.off += 4 -- return tmp, nil --} -- --func (rd *realDecoder) getInt64() (int64, error) { -- if rd.remaining() < 8 { -- rd.off = len(rd.raw) -- return -1, ErrInsufficientData -- } -- tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) -- rd.off += 8 -- return tmp, nil --} -- --func (rd *realDecoder) getVarint() (int64, error) { -- tmp, n := binary.Varint(rd.raw[rd.off:]) -- if n == 0 { -- rd.off = len(rd.raw) -- return -1, ErrInsufficientData -- } -- if n < 0 { -- rd.off -= n -- return -1, errVarintOverflow -- } -- rd.off += n -- return tmp, nil --} -- --func (rd *realDecoder) getArrayLength() (int, error) { -- if rd.remaining() < 4 { -- rd.off = len(rd.raw) -- return -1, ErrInsufficientData -- } -- tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))) -- rd.off += 4 -- if tmp > rd.remaining() { -- rd.off = len(rd.raw) -- return -1, ErrInsufficientData -- } else if tmp > 2*math.MaxUint16 { -- return -1, errInvalidArrayLength -- } -- return tmp, nil --} -- --func (rd *realDecoder) getBool() (bool, error) { -- b, err := rd.getInt8() -- if err != nil || b == 0 { -- return false, err -- } -- if b != 1 { -- return false, errInvalidBool -- } -- return true, nil --} -- --// collections -- --func (rd *realDecoder) getBytes() ([]byte, error) { -- tmp, err := rd.getInt32() -- if err != nil { -- return nil, err -- } -- if tmp == -1 { -- return nil, nil -- } -- -- return rd.getRawBytes(int(tmp)) --} -- --func (rd *realDecoder) getVarintBytes() ([]byte, error) { -- tmp, err := rd.getVarint() -- if err != nil { -- return nil, err -- } -- if tmp == -1 { -- return nil, nil -- } -- -- return rd.getRawBytes(int(tmp)) --} -- --func (rd *realDecoder) getStringLength() (int, error) { -- length, err := rd.getInt16() -- if err != nil { -- return 0, err -- } -- -- n := int(length) -- -- switch { -- case n < -1: -- return 0, errInvalidStringLength -- case n > rd.remaining(): -- rd.off = len(rd.raw) -- return 0, ErrInsufficientData -- } -- -- return n, nil --} -- --func (rd *realDecoder) getString() (string, error) { -- n, err := rd.getStringLength() -- if err != nil || n == -1 { -- return """", err -- } -- -- tmpStr := string(rd.raw[rd.off : rd.off+n]) -- rd.off += n -- return tmpStr, nil --} -- --func (rd *realDecoder) getNullableString() (*string, error) { -- n, err := rd.getStringLength() -- if err != nil || n == -1 { -- return nil, err -- } -- -- tmpStr := string(rd.raw[rd.off : rd.off+n]) -- rd.off += n -- return &tmpStr, err --} -- --func (rd *realDecoder) getInt32Array() ([]int32, error) { -- if rd.remaining() < 4 { -- rd.off = len(rd.raw) -- return nil, ErrInsufficientData -- } -- n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) -- rd.off += 4 -- -- if rd.remaining() < 4*n { -- rd.off = len(rd.raw) -- return nil, ErrInsufficientData -- } -- -- if n == 0 { -- return nil, nil -- } -- -- if n < 0 { -- return nil, errInvalidArrayLength -- } -- -- ret := make([]int32, n) -- for i := range ret { -- ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) -- rd.off += 4 -- } -- return ret, nil --} -- --func (rd *realDecoder) getInt64Array() ([]int64, error) { -- if rd.remaining() < 4 { -- rd.off = len(rd.raw) -- return nil, ErrInsufficientData -- } -- n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) -- rd.off += 4 -- -- if rd.remaining() < 8*n { -- rd.off = len(rd.raw) -- return nil, ErrInsufficientData -- } -- -- if n == 0 { -- return nil, nil -- } -- -- if n < 0 { -- return nil, errInvalidArrayLength -- } -- -- ret := make([]int64, n) -- for i := range ret { -- ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) -- rd.off += 8 -- } -- return ret, nil --} -- --func (rd *realDecoder) getStringArray() ([]string, error) { -- if rd.remaining() < 4 { -- rd.off = len(rd.raw) -- return nil, ErrInsufficientData -- } -- n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) -- rd.off += 4 -- -- if n == 0 { -- return nil, nil -- } -- -- if n < 0 { -- return nil, errInvalidArrayLength -- } -- -- ret := make([]string, n) -- for i := range ret { -- str, err := rd.getString() -- if err != nil { -- return nil, err -- } -- -- ret[i] = str -- } -- return ret, nil --} -- --// subsets -- --func (rd *realDecoder) remaining() int { -- return len(rd.raw) - rd.off --} -- --func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { -- buf, err := rd.getRawBytes(length) -- if err != nil { -- return nil, err -- } -- return &realDecoder{raw: buf}, nil --} -- --func (rd *realDecoder) getRawBytes(length int) ([]byte, error) { -- if length < 0 { -- return nil, errInvalidByteSliceLength -- } else if length > rd.remaining() { -- rd.off = len(rd.raw) -- return nil, ErrInsufficientData -- } -- -- start := rd.off -- rd.off += length -- return rd.raw[start:rd.off], nil --} -- --func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) { -- if rd.remaining() < offset+length { -- return nil, ErrInsufficientData -- } -- off := rd.off + offset -- return &realDecoder{raw: rd.raw[off : off+length]}, nil --} -- --// stacks -- --func (rd *realDecoder) push(in pushDecoder) error { -- in.saveOffset(rd.off) -- -- var reserve int -- if dpd, ok := in.(dynamicPushDecoder); ok { -- if err := dpd.decode(rd); err != nil { -- return err -- } -- } else { -- reserve = in.reserveLength() -- if rd.remaining() < reserve { -- rd.off = len(rd.raw) -- return ErrInsufficientData -- } -- } -- -- rd.stack = append(rd.stack, in) -- -- rd.off += reserve -- -- return nil --} -- --func (rd *realDecoder) pop() error { -- // this is go's ugly pop pattern (the inverse of append) -- in := rd.stack[len(rd.stack)-1] -- rd.stack = rd.stack[:len(rd.stack)-1] -- -- return in.check(rd.off, rd.raw) --} -diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go -deleted file mode 100644 -index 3c75387f779b7..0000000000000 ---- a/vendor/github.com/Shopify/sarama/real_encoder.go -+++ /dev/null -@@ -1,156 +0,0 @@ --package sarama -- --import ( -- ""encoding/binary"" -- -- ""github.com/rcrowley/go-metrics"" --) -- --type realEncoder struct { -- raw []byte -- off int -- stack []pushEncoder -- registry metrics.Registry --} -- --// primitives -- --func (re *realEncoder) putInt8(in int8) { -- re.raw[re.off] = byte(in) -- re.off++ --} -- --func (re *realEncoder) putInt16(in int16) { -- binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in)) -- re.off += 2 --} -- --func (re *realEncoder) putInt32(in int32) { -- binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in)) -- re.off += 4 --} -- --func (re *realEncoder) putInt64(in int64) { -- binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in)) -- re.off += 8 --} -- --func (re *realEncoder) putVarint(in int64) { -- re.off += binary.PutVarint(re.raw[re.off:], in) --} -- --func (re *realEncoder) putArrayLength(in int) error { -- re.putInt32(int32(in)) -- return nil --} -- --func (re *realEncoder) putBool(in bool) { -- if in { -- re.putInt8(1) -- return -- } -- re.putInt8(0) --} -- --// collection -- --func (re *realEncoder) putRawBytes(in []byte) error { -- copy(re.raw[re.off:], in) -- re.off += len(in) -- return nil --} -- --func (re *realEncoder) putBytes(in []byte) error { -- if in == nil { -- re.putInt32(-1) -- return nil -- } -- re.putInt32(int32(len(in))) -- return re.putRawBytes(in) --} -- --func (re *realEncoder) putVarintBytes(in []byte) error { -- if in == nil { -- re.putVarint(-1) -- return nil -- } -- re.putVarint(int64(len(in))) -- return re.putRawBytes(in) --} -- --func (re *realEncoder) putString(in string) error { -- re.putInt16(int16(len(in))) -- copy(re.raw[re.off:], in) -- re.off += len(in) -- return nil --} -- --func (re *realEncoder) putNullableString(in *string) error { -- if in == nil { -- re.putInt16(-1) -- return nil -- } -- return re.putString(*in) --} -- --func (re *realEncoder) putStringArray(in []string) error { -- err := re.putArrayLength(len(in)) -- if err != nil { -- return err -- } -- -- for _, val := range in { -- if err := re.putString(val); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (re *realEncoder) putInt32Array(in []int32) error { -- err := re.putArrayLength(len(in)) -- if err != nil { -- return err -- } -- for _, val := range in { -- re.putInt32(val) -- } -- return nil --} -- --func (re *realEncoder) putInt64Array(in []int64) error { -- err := re.putArrayLength(len(in)) -- if err != nil { -- return err -- } -- for _, val := range in { -- re.putInt64(val) -- } -- return nil --} -- --func (re *realEncoder) offset() int { -- return re.off --} -- --// stacks -- --func (re *realEncoder) push(in pushEncoder) { -- in.saveOffset(re.off) -- re.off += in.reserveLength() -- re.stack = append(re.stack, in) --} -- --func (re *realEncoder) pop() error { -- // this is go's ugly pop pattern (the inverse of append) -- in := re.stack[len(re.stack)-1] -- re.stack = re.stack[:len(re.stack)-1] -- -- return in.run(re.off, re.raw) --} -- --// we do record metrics during the real encoder pass --func (re *realEncoder) metricRegistry() metrics.Registry { -- return re.registry --} -diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go -deleted file mode 100644 -index cded308cf0fd1..0000000000000 ---- a/vendor/github.com/Shopify/sarama/record.go -+++ /dev/null -@@ -1,113 +0,0 @@ --package sarama -- --import ( -- ""encoding/binary"" -- ""time"" --) -- --const ( -- controlMask = 0x20 -- maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1 --) -- --type RecordHeader struct { -- Key []byte -- Value []byte --} -- --func (h *RecordHeader) encode(pe packetEncoder) error { -- if err := pe.putVarintBytes(h.Key); err != nil { -- return err -- } -- return pe.putVarintBytes(h.Value) --} -- --func (h *RecordHeader) decode(pd packetDecoder) (err error) { -- if h.Key, err = pd.getVarintBytes(); err != nil { -- return err -- } -- -- if h.Value, err = pd.getVarintBytes(); err != nil { -- return err -- } -- return nil --} -- --type Record struct { -- Attributes int8 -- TimestampDelta time.Duration -- OffsetDelta int64 -- Key []byte -- Value []byte -- Headers []*RecordHeader -- -- length varintLengthField --} -- --func (r *Record) encode(pe packetEncoder) error { -- pe.push(&r.length) -- pe.putInt8(r.Attributes) -- pe.putVarint(int64(r.TimestampDelta / time.Millisecond)) -- pe.putVarint(r.OffsetDelta) -- if err := pe.putVarintBytes(r.Key); err != nil { -- return err -- } -- if err := pe.putVarintBytes(r.Value); err != nil { -- return err -- } -- pe.putVarint(int64(len(r.Headers))) -- -- for _, h := range r.Headers { -- if err := h.encode(pe); err != nil { -- return err -- } -- } -- -- return pe.pop() --} -- --func (r *Record) decode(pd packetDecoder) (err error) { -- if err = pd.push(&r.length); err != nil { -- return err -- } -- -- if r.Attributes, err = pd.getInt8(); err != nil { -- return err -- } -- -- timestamp, err := pd.getVarint() -- if err != nil { -- return err -- } -- r.TimestampDelta = time.Duration(timestamp) * time.Millisecond -- -- if r.OffsetDelta, err = pd.getVarint(); err != nil { -- return err -- } -- -- if r.Key, err = pd.getVarintBytes(); err != nil { -- return err -- } -- -- if r.Value, err = pd.getVarintBytes(); err != nil { -- return err -- } -- -- numHeaders, err := pd.getVarint() -- if err != nil { -- return err -- } -- -- if numHeaders >= 0 { -- r.Headers = make([]*RecordHeader, numHeaders) -- } -- for i := int64(0); i < numHeaders; i++ { -- hdr := new(RecordHeader) -- if err := hdr.decode(pd); err != nil { -- return err -- } -- r.Headers[i] = hdr -- } -- -- return pd.pop() --} -diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go -deleted file mode 100644 -index 321de485b0db4..0000000000000 ---- a/vendor/github.com/Shopify/sarama/record_batch.go -+++ /dev/null -@@ -1,259 +0,0 @@ --package sarama -- --import ( -- ""bytes"" -- ""compress/gzip"" -- ""fmt"" -- ""io/ioutil"" -- ""time"" -- -- ""github.com/eapache/go-xerial-snappy"" -- ""github.com/pierrec/lz4"" --) -- --const recordBatchOverhead = 49 -- --type recordsArray []*Record -- --func (e recordsArray) encode(pe packetEncoder) error { -- for _, r := range e { -- if err := r.encode(pe); err != nil { -- return err -- } -- } -- return nil --} -- --func (e recordsArray) decode(pd packetDecoder) error { -- for i := range e { -- rec := &Record{} -- if err := rec.decode(pd); err != nil { -- return err -- } -- e[i] = rec -- } -- return nil --} -- --type RecordBatch struct { -- FirstOffset int64 -- PartitionLeaderEpoch int32 -- Version int8 -- Codec CompressionCodec -- Control bool -- LastOffsetDelta int32 -- FirstTimestamp time.Time -- MaxTimestamp time.Time -- ProducerID int64 -- ProducerEpoch int16 -- FirstSequence int32 -- Records []*Record -- PartialTrailingRecord bool -- -- compressedRecords []byte -- recordsLen int // uncompressed records size --} -- --func (b *RecordBatch) encode(pe packetEncoder) error { -- if b.Version != 2 { -- return PacketEncodingError{fmt.Sprintf(""unsupported compression codec (%d)"", b.Codec)} -- } -- pe.putInt64(b.FirstOffset) -- pe.push(&lengthField{}) -- pe.putInt32(b.PartitionLeaderEpoch) -- pe.putInt8(b.Version) -- pe.push(newCRC32Field(crcCastagnoli)) -- pe.putInt16(b.computeAttributes()) -- pe.putInt32(b.LastOffsetDelta) -- -- if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil { -- return err -- } -- -- if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil { -- return err -- } -- -- pe.putInt64(b.ProducerID) -- pe.putInt16(b.ProducerEpoch) -- pe.putInt32(b.FirstSequence) -- -- if err := pe.putArrayLength(len(b.Records)); err != nil { -- return err -- } -- -- if b.compressedRecords == nil { -- if err := b.encodeRecords(pe); err != nil { -- return err -- } -- } -- if err := pe.putRawBytes(b.compressedRecords); err != nil { -- return err -- } -- -- if err := pe.pop(); err != nil { -- return err -- } -- return pe.pop() --} -- --func (b *RecordBatch) decode(pd packetDecoder) (err error) { -- if b.FirstOffset, err = pd.getInt64(); err != nil { -- return err -- } -- -- batchLen, err := pd.getInt32() -- if err != nil { -- return err -- } -- -- if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil { -- return err -- } -- -- if b.Version, err = pd.getInt8(); err != nil { -- return err -- } -- -- if err = pd.push(&crc32Field{polynomial: crcCastagnoli}); err != nil { -- return err -- } -- -- attributes, err := pd.getInt16() -- if err != nil { -- return err -- } -- b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask) -- b.Control = attributes&controlMask == controlMask -- -- if b.LastOffsetDelta, err = pd.getInt32(); err != nil { -- return err -- } -- -- if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil { -- return err -- } -- -- if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil { -- return err -- } -- -- if b.ProducerID, err = pd.getInt64(); err != nil { -- return err -- } -- -- if b.ProducerEpoch, err = pd.getInt16(); err != nil { -- return err -- } -- -- if b.FirstSequence, err = pd.getInt32(); err != nil { -- return err -- } -- -- numRecs, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- if numRecs >= 0 { -- b.Records = make([]*Record, numRecs) -- } -- -- bufSize := int(batchLen) - recordBatchOverhead -- recBuffer, err := pd.getRawBytes(bufSize) -- if err != nil { -- if err == ErrInsufficientData { -- b.PartialTrailingRecord = true -- b.Records = nil -- return nil -- } -- return err -- } -- -- if err = pd.pop(); err != nil { -- return err -- } -- -- switch b.Codec { -- case CompressionNone: -- case CompressionGZIP: -- reader, err := gzip.NewReader(bytes.NewReader(recBuffer)) -- if err != nil { -- return err -- } -- if recBuffer, err = ioutil.ReadAll(reader); err != nil { -- return err -- } -- case CompressionSnappy: -- if recBuffer, err = snappy.Decode(recBuffer); err != nil { -- return err -- } -- case CompressionLZ4: -- reader := lz4.NewReader(bytes.NewReader(recBuffer)) -- if recBuffer, err = ioutil.ReadAll(reader); err != nil { -- return err -- } -- default: -- return PacketDecodingError{fmt.Sprintf(""invalid compression specified (%d)"", b.Codec)} -- } -- -- b.recordsLen = len(recBuffer) -- err = decode(recBuffer, recordsArray(b.Records)) -- if err == ErrInsufficientData { -- b.PartialTrailingRecord = true -- b.Records = nil -- return nil -- } -- return err --} -- --func (b *RecordBatch) encodeRecords(pe packetEncoder) error { -- var raw []byte -- var err error -- if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil { -- return err -- } -- b.recordsLen = len(raw) -- -- switch b.Codec { -- case CompressionNone: -- b.compressedRecords = raw -- case CompressionGZIP: -- var buf bytes.Buffer -- writer := gzip.NewWriter(&buf) -- if _, err := writer.Write(raw); err != nil { -- return err -- } -- if err := writer.Close(); err != nil { -- return err -- } -- b.compressedRecords = buf.Bytes() -- case CompressionSnappy: -- b.compressedRecords = snappy.Encode(raw) -- case CompressionLZ4: -- var buf bytes.Buffer -- writer := lz4.NewWriter(&buf) -- if _, err := writer.Write(raw); err != nil { -- return err -- } -- if err := writer.Close(); err != nil { -- return err -- } -- b.compressedRecords = buf.Bytes() -- default: -- return PacketEncodingError{fmt.Sprintf(""unsupported compression codec (%d)"", b.Codec)} -- } -- -- return nil --} -- --func (b *RecordBatch) computeAttributes() int16 { -- attr := int16(b.Codec) & int16(compressionCodecMask) -- if b.Control { -- attr |= controlMask -- } -- return attr --} -- --func (b *RecordBatch) addRecord(r *Record) { -- b.Records = append(b.Records, r) --} -diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go -deleted file mode 100644 -index 258dcbac880aa..0000000000000 ---- a/vendor/github.com/Shopify/sarama/records.go -+++ /dev/null -@@ -1,173 +0,0 @@ --package sarama -- --import ""fmt"" -- --const ( -- unknownRecords = iota -- legacyRecords -- defaultRecords -- -- magicOffset = 16 -- magicLength = 1 --) -- --// Records implements a union type containing either a RecordBatch or a legacy MessageSet. --type Records struct { -- recordsType int -- msgSet *MessageSet -- recordBatch *RecordBatch --} -- --func newLegacyRecords(msgSet *MessageSet) Records { -- return Records{recordsType: legacyRecords, msgSet: msgSet} --} -- --func newDefaultRecords(batch *RecordBatch) Records { -- return Records{recordsType: defaultRecords, recordBatch: batch} --} -- --// setTypeFromFields sets type of Records depending on which of msgSet or recordBatch is not nil. --// The first return value indicates whether both fields are nil (and the type is not set). --// If both fields are not nil, it returns an error. --func (r *Records) setTypeFromFields() (bool, error) { -- if r.msgSet == nil && r.recordBatch == nil { -- return true, nil -- } -- if r.msgSet != nil && r.recordBatch != nil { -- return false, fmt.Errorf(""both msgSet and recordBatch are set, but record type is unknown"") -- } -- r.recordsType = defaultRecords -- if r.msgSet != nil { -- r.recordsType = legacyRecords -- } -- return false, nil --} -- --func (r *Records) encode(pe packetEncoder) error { -- if r.recordsType == unknownRecords { -- if empty, err := r.setTypeFromFields(); err != nil || empty { -- return err -- } -- } -- -- switch r.recordsType { -- case legacyRecords: -- if r.msgSet == nil { -- return nil -- } -- return r.msgSet.encode(pe) -- case defaultRecords: -- if r.recordBatch == nil { -- return nil -- } -- return r.recordBatch.encode(pe) -- } -- -- return fmt.Errorf(""unknown records type: %v"", r.recordsType) --} -- --func (r *Records) setTypeFromMagic(pd packetDecoder) error { -- magic, err := magicValue(pd) -- if err != nil { -- return err -- } -- -- r.recordsType = defaultRecords -- if magic < 2 { -- r.recordsType = legacyRecords -- } -- -- return nil --} -- --func (r *Records) decode(pd packetDecoder) error { -- if r.recordsType == unknownRecords { -- if err := r.setTypeFromMagic(pd); err != nil { -- return err -- } -- } -- -- switch r.recordsType { -- case legacyRecords: -- r.msgSet = &MessageSet{} -- return r.msgSet.decode(pd) -- case defaultRecords: -- r.recordBatch = &RecordBatch{} -- return r.recordBatch.decode(pd) -- } -- return fmt.Errorf(""unknown records type: %v"", r.recordsType) --} -- --func (r *Records) numRecords() (int, error) { -- if r.recordsType == unknownRecords { -- if empty, err := r.setTypeFromFields(); err != nil || empty { -- return 0, err -- } -- } -- -- switch r.recordsType { -- case legacyRecords: -- if r.msgSet == nil { -- return 0, nil -- } -- return len(r.msgSet.Messages), nil -- case defaultRecords: -- if r.recordBatch == nil { -- return 0, nil -- } -- return len(r.recordBatch.Records), nil -- } -- return 0, fmt.Errorf(""unknown records type: %v"", r.recordsType) --} -- --func (r *Records) isPartial() (bool, error) { -- if r.recordsType == unknownRecords { -- if empty, err := r.setTypeFromFields(); err != nil || empty { -- return false, err -- } -- } -- -- switch r.recordsType { -- case unknownRecords: -- return false, nil -- case legacyRecords: -- if r.msgSet == nil { -- return false, nil -- } -- return r.msgSet.PartialTrailingMessage, nil -- case defaultRecords: -- if r.recordBatch == nil { -- return false, nil -- } -- return r.recordBatch.PartialTrailingRecord, nil -- } -- return false, fmt.Errorf(""unknown records type: %v"", r.recordsType) --} -- --func (r *Records) isControl() (bool, error) { -- if r.recordsType == unknownRecords { -- if empty, err := r.setTypeFromFields(); err != nil || empty { -- return false, err -- } -- } -- -- switch r.recordsType { -- case legacyRecords: -- return false, nil -- case defaultRecords: -- if r.recordBatch == nil { -- return false, nil -- } -- return r.recordBatch.Control, nil -- } -- return false, fmt.Errorf(""unknown records type: %v"", r.recordsType) --} -- --func magicValue(pd packetDecoder) (int8, error) { -- dec, err := pd.peek(magicOffset, magicLength) -- if err != nil { -- return 0, err -- } -- -- return dec.getInt8() --} -diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go -deleted file mode 100644 -index 5f7cb76e95b44..0000000000000 ---- a/vendor/github.com/Shopify/sarama/request.go -+++ /dev/null -@@ -1,145 +0,0 @@ --package sarama -- --import ( -- ""encoding/binary"" -- ""fmt"" -- ""io"" --) -- --type protocolBody interface { -- encoder -- versionedDecoder -- key() int16 -- version() int16 -- requiredVersion() KafkaVersion --} -- --type request struct { -- correlationID int32 -- clientID string -- body protocolBody --} -- --func (r *request) encode(pe packetEncoder) (err error) { -- pe.push(&lengthField{}) -- pe.putInt16(r.body.key()) -- pe.putInt16(r.body.version()) -- pe.putInt32(r.correlationID) -- err = pe.putString(r.clientID) -- if err != nil { -- return err -- } -- err = r.body.encode(pe) -- if err != nil { -- return err -- } -- return pe.pop() --} -- --func (r *request) decode(pd packetDecoder) (err error) { -- var key int16 -- if key, err = pd.getInt16(); err != nil { -- return err -- } -- var version int16 -- if version, err = pd.getInt16(); err != nil { -- return err -- } -- if r.correlationID, err = pd.getInt32(); err != nil { -- return err -- } -- r.clientID, err = pd.getString() -- -- r.body = allocateBody(key, version) -- if r.body == nil { -- return PacketDecodingError{fmt.Sprintf(""unknown request key (%d)"", key)} -- } -- return r.body.decode(pd, version) --} -- --func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) { -- lengthBytes := make([]byte, 4) -- if _, err := io.ReadFull(r, lengthBytes); err != nil { -- return nil, bytesRead, err -- } -- bytesRead += len(lengthBytes) -- -- length := int32(binary.BigEndian.Uint32(lengthBytes)) -- if length <= 4 || length > MaxRequestSize { -- return nil, bytesRead, PacketDecodingError{fmt.Sprintf(""message of length %d too large or too small"", length)} -- } -- -- encodedReq := make([]byte, length) -- if _, err := io.ReadFull(r, encodedReq); err != nil { -- return nil, bytesRead, err -- } -- bytesRead += len(encodedReq) -- -- req = &request{} -- if err := decode(encodedReq, req); err != nil { -- return nil, bytesRead, err -- } -- return req, bytesRead, nil --} -- --func allocateBody(key, version int16) protocolBody { -- switch key { -- case 0: -- return &ProduceRequest{} -- case 1: -- return &FetchRequest{} -- case 2: -- return &OffsetRequest{Version: version} -- case 3: -- return &MetadataRequest{} -- case 8: -- return &OffsetCommitRequest{Version: version} -- case 9: -- return &OffsetFetchRequest{} -- case 10: -- return &ConsumerMetadataRequest{} -- case 11: -- return &JoinGroupRequest{} -- case 12: -- return &HeartbeatRequest{} -- case 13: -- return &LeaveGroupRequest{} -- case 14: -- return &SyncGroupRequest{} -- case 15: -- return &DescribeGroupsRequest{} -- case 16: -- return &ListGroupsRequest{} -- case 17: -- return &SaslHandshakeRequest{} -- case 18: -- return &ApiVersionsRequest{} -- case 19: -- return &CreateTopicsRequest{} -- case 20: -- return &DeleteTopicsRequest{} -- case 22: -- return &InitProducerIDRequest{} -- case 24: -- return &AddPartitionsToTxnRequest{} -- case 25: -- return &AddOffsetsToTxnRequest{} -- case 26: -- return &EndTxnRequest{} -- case 28: -- return &TxnOffsetCommitRequest{} -- case 29: -- return &DescribeAclsRequest{} -- case 30: -- return &CreateAclsRequest{} -- case 31: -- return &DeleteAclsRequest{} -- case 32: -- return &DescribeConfigsRequest{} -- case 33: -- return &AlterConfigsRequest{} -- case 37: -- return &CreatePartitionsRequest{} -- } -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go -deleted file mode 100644 -index f3f4d27d6c428..0000000000000 ---- a/vendor/github.com/Shopify/sarama/response_header.go -+++ /dev/null -@@ -1,21 +0,0 @@ --package sarama -- --import ""fmt"" -- --type responseHeader struct { -- length int32 -- correlationID int32 --} -- --func (r *responseHeader) decode(pd packetDecoder) (err error) { -- r.length, err = pd.getInt32() -- if err != nil { -- return err -- } -- if r.length <= 4 || r.length > MaxResponseSize { -- return PacketDecodingError{fmt.Sprintf(""message of length %d too large or too small"", r.length)} -- } -- -- r.correlationID, err = pd.getInt32() -- return err --} -diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go -deleted file mode 100644 -index 7d5dc60d3e282..0000000000000 ---- a/vendor/github.com/Shopify/sarama/sarama.go -+++ /dev/null -@@ -1,99 +0,0 @@ --/* --Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level --API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level --API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation. -- --To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel --and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases. --The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be --useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees --depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the --SyncProducer can still sometimes be lost. -- --To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic --consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the --https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 --and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. -- --For lower-level needs, the Broker and Request/Response objects permit precise control over each connection --and message sent on the wire; the Client provides higher-level metadata management that is shared between --the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up --exactly with the protocol fields documented by Kafka at --https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol -- --Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry. -- --Broker related metrics: -- -- +----------------------------------------------+------------+---------------------------------------------------------------+ -- | Name | Type | Description | -- +----------------------------------------------+------------+---------------------------------------------------------------+ -- | incoming-byte-rate | meter | Bytes/second read off all brokers | -- | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker | -- | outgoing-byte-rate | meter | Bytes/second written off all brokers | -- | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker | -- | request-rate | meter | Requests/second sent to all brokers | -- | request-rate-for-broker- | meter | Requests/second sent to a given broker | -- | request-size | histogram | Distribution of the request size in bytes for all brokers | -- | request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker | -- | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers | -- | request-latency-in-ms-for-broker- | histogram | Distribution of the request latency in ms for a given broker | -- | response-rate | meter | Responses/second received from all brokers | -- | response-rate-for-broker- | meter | Responses/second received from a given broker | -- | response-size | histogram | Distribution of the response size in bytes for all brokers | -- | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker | -- +----------------------------------------------+------------+---------------------------------------------------------------+ -- --Note that we do not gather specific metrics for seed brokers but they are part of the ""all brokers"" metrics. -- --Producer related metrics: -- -- +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ -- | Name | Type | Description | -- +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ -- | batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics | -- | batch-size-for-topic- | histogram | Distribution of the number of bytes sent per partition per request for a given topic | -- | record-send-rate | meter | Records/second sent to all topics | -- | record-send-rate-for-topic- | meter | Records/second sent to a given topic | -- | records-per-request | histogram | Distribution of the number of records sent per request for all topics | -- | records-per-request-for-topic- | histogram | Distribution of the number of records sent per request for a given topic | -- | compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics | -- | compression-ratio-for-topic- | histogram | Distribution of the compression ratio times 100 of record batches for a given topic | -- +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ -- --*/ --package sarama -- --import ( -- ""io/ioutil"" -- ""log"" --) -- --// Logger is the instance of a StdLogger interface that Sarama writes connection --// management events to. By default it is set to discard all log messages via ioutil.Discard, --// but you can set it to redirect wherever you want. --var Logger StdLogger = log.New(ioutil.Discard, ""[Sarama] "", log.LstdFlags) -- --// StdLogger is used to log error messages. --type StdLogger interface { -- Print(v ...interface{}) -- Printf(format string, v ...interface{}) -- Println(v ...interface{}) --} -- --// PanicHandler is called for recovering from panics spawned internally to the library (and thus --// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. --var PanicHandler func(interface{}) -- --// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying --// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned --// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt --// to process. --var MaxRequestSize int32 = 100 * 1024 * 1024 -- --// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If --// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to --// protect the client from running out of memory. Please note that brokers do not have any natural limit on --// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers --// (see https://issues.apache.org/jira/browse/KAFKA-2063). --var MaxResponseSize int32 = 100 * 1024 * 1024 -diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go -deleted file mode 100644 -index fbbc8947b2ef3..0000000000000 ---- a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go -+++ /dev/null -@@ -1,33 +0,0 @@ --package sarama -- --type SaslHandshakeRequest struct { -- Mechanism string --} -- --func (r *SaslHandshakeRequest) encode(pe packetEncoder) error { -- if err := pe.putString(r.Mechanism); err != nil { -- return err -- } -- -- return nil --} -- --func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) { -- if r.Mechanism, err = pd.getString(); err != nil { -- return err -- } -- -- return nil --} -- --func (r *SaslHandshakeRequest) key() int16 { -- return 17 --} -- --func (r *SaslHandshakeRequest) version() int16 { -- return 0 --} -- --func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion { -- return V0_10_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go -deleted file mode 100644 -index ef290d4bc6da3..0000000000000 ---- a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go -+++ /dev/null -@@ -1,38 +0,0 @@ --package sarama -- --type SaslHandshakeResponse struct { -- Err KError -- EnabledMechanisms []string --} -- --func (r *SaslHandshakeResponse) encode(pe packetEncoder) error { -- pe.putInt16(int16(r.Err)) -- return pe.putStringArray(r.EnabledMechanisms) --} -- --func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error { -- kerr, err := pd.getInt16() -- if err != nil { -- return err -- } -- -- r.Err = KError(kerr) -- -- if r.EnabledMechanisms, err = pd.getStringArray(); err != nil { -- return err -- } -- -- return nil --} -- --func (r *SaslHandshakeResponse) key() int16 { -- return 17 --} -- --func (r *SaslHandshakeResponse) version() int16 { -- return 0 --} -- --func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion { -- return V0_10_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go -deleted file mode 100644 -index fe207080e03a3..0000000000000 ---- a/vendor/github.com/Shopify/sarama/sync_group_request.go -+++ /dev/null -@@ -1,100 +0,0 @@ --package sarama -- --type SyncGroupRequest struct { -- GroupId string -- GenerationId int32 -- MemberId string -- GroupAssignments map[string][]byte --} -- --func (r *SyncGroupRequest) encode(pe packetEncoder) error { -- if err := pe.putString(r.GroupId); err != nil { -- return err -- } -- -- pe.putInt32(r.GenerationId) -- -- if err := pe.putString(r.MemberId); err != nil { -- return err -- } -- -- if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil { -- return err -- } -- for memberId, memberAssignment := range r.GroupAssignments { -- if err := pe.putString(memberId); err != nil { -- return err -- } -- if err := pe.putBytes(memberAssignment); err != nil { -- return err -- } -- } -- -- return nil --} -- --func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) { -- if r.GroupId, err = pd.getString(); err != nil { -- return -- } -- if r.GenerationId, err = pd.getInt32(); err != nil { -- return -- } -- if r.MemberId, err = pd.getString(); err != nil { -- return -- } -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- if n == 0 { -- return nil -- } -- -- r.GroupAssignments = make(map[string][]byte) -- for i := 0; i < n; i++ { -- memberId, err := pd.getString() -- if err != nil { -- return err -- } -- memberAssignment, err := pd.getBytes() -- if err != nil { -- return err -- } -- -- r.GroupAssignments[memberId] = memberAssignment -- } -- -- return nil --} -- --func (r *SyncGroupRequest) key() int16 { -- return 14 --} -- --func (r *SyncGroupRequest) version() int16 { -- return 0 --} -- --func (r *SyncGroupRequest) requiredVersion() KafkaVersion { -- return V0_9_0_0 --} -- --func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) { -- if r.GroupAssignments == nil { -- r.GroupAssignments = make(map[string][]byte) -- } -- -- r.GroupAssignments[memberId] = memberAssignment --} -- --func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error { -- bin, err := encode(memberAssignment, nil) -- if err != nil { -- return err -- } -- -- r.AddGroupAssignment(memberId, bin) -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go -deleted file mode 100644 -index 194b382b4ab63..0000000000000 ---- a/vendor/github.com/Shopify/sarama/sync_group_response.go -+++ /dev/null -@@ -1,41 +0,0 @@ --package sarama -- --type SyncGroupResponse struct { -- Err KError -- MemberAssignment []byte --} -- --func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { -- assignment := new(ConsumerGroupMemberAssignment) -- err := decode(r.MemberAssignment, assignment) -- return assignment, err --} -- --func (r *SyncGroupResponse) encode(pe packetEncoder) error { -- pe.putInt16(int16(r.Err)) -- return pe.putBytes(r.MemberAssignment) --} -- --func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) { -- kerr, err := pd.getInt16() -- if err != nil { -- return err -- } -- -- r.Err = KError(kerr) -- -- r.MemberAssignment, err = pd.getBytes() -- return --} -- --func (r *SyncGroupResponse) key() int16 { -- return 14 --} -- --func (r *SyncGroupResponse) version() int16 { -- return 0 --} -- --func (r *SyncGroupResponse) requiredVersion() KafkaVersion { -- return V0_9_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go -deleted file mode 100644 -index dd096b6db6719..0000000000000 ---- a/vendor/github.com/Shopify/sarama/sync_producer.go -+++ /dev/null -@@ -1,164 +0,0 @@ --package sarama -- --import ""sync"" -- --// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct --// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer --// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope. --// --// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual --// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`. --// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost. --// --// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to --// be set to true in its configuration. --type SyncProducer interface { -- -- // SendMessage produces a given message, and returns only when it either has -- // succeeded or failed to produce. It will return the partition and the offset -- // of the produced message, or an error if the message failed to produce. -- SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) -- -- // SendMessages produces a given set of messages, and returns only when all -- // messages in the set have either succeeded or failed. Note that messages -- // can succeed and fail individually; if some succeed and some fail, -- // SendMessages will return an error. -- SendMessages(msgs []*ProducerMessage) error -- -- // Close shuts down the producer and waits for any buffered messages to be -- // flushed. You must call this function before a producer object passes out of -- // scope, as it may otherwise leak memory. You must call this before calling -- // Close on the underlying client. -- Close() error --} -- --type syncProducer struct { -- producer *asyncProducer -- wg sync.WaitGroup --} -- --// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration. --func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) { -- if config == nil { -- config = NewConfig() -- config.Producer.Return.Successes = true -- } -- -- if err := verifyProducerConfig(config); err != nil { -- return nil, err -- } -- -- p, err := NewAsyncProducer(addrs, config) -- if err != nil { -- return nil, err -- } -- return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil --} -- --// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still --// necessary to call Close() on the underlying client when shutting down this producer. --func NewSyncProducerFromClient(client Client) (SyncProducer, error) { -- if err := verifyProducerConfig(client.Config()); err != nil { -- return nil, err -- } -- -- p, err := NewAsyncProducerFromClient(client) -- if err != nil { -- return nil, err -- } -- return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil --} -- --func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer { -- sp := &syncProducer{producer: p} -- -- sp.wg.Add(2) -- go withRecover(sp.handleSuccesses) -- go withRecover(sp.handleErrors) -- -- return sp --} -- --func verifyProducerConfig(config *Config) error { -- if !config.Producer.Return.Errors { -- return ConfigurationError(""Producer.Return.Errors must be true to be used in a SyncProducer"") -- } -- if !config.Producer.Return.Successes { -- return ConfigurationError(""Producer.Return.Successes must be true to be used in a SyncProducer"") -- } -- return nil --} -- --func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) { -- oldMetadata := msg.Metadata -- defer func() { -- msg.Metadata = oldMetadata -- }() -- -- expectation := make(chan *ProducerError, 1) -- msg.Metadata = expectation -- sp.producer.Input() <- msg -- -- if err := <-expectation; err != nil { -- return -1, -1, err.Err -- } -- -- return msg.Partition, msg.Offset, nil --} -- --func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error { -- savedMetadata := make([]interface{}, len(msgs)) -- for i := range msgs { -- savedMetadata[i] = msgs[i].Metadata -- } -- defer func() { -- for i := range msgs { -- msgs[i].Metadata = savedMetadata[i] -- } -- }() -- -- expectations := make(chan chan *ProducerError, len(msgs)) -- go func() { -- for _, msg := range msgs { -- expectation := make(chan *ProducerError, 1) -- msg.Metadata = expectation -- sp.producer.Input() <- msg -- expectations <- expectation -- } -- close(expectations) -- }() -- -- var errors ProducerErrors -- for expectation := range expectations { -- if err := <-expectation; err != nil { -- errors = append(errors, err) -- } -- } -- -- if len(errors) > 0 { -- return errors -- } -- return nil --} -- --func (sp *syncProducer) handleSuccesses() { -- defer sp.wg.Done() -- for msg := range sp.producer.Successes() { -- expectation := msg.Metadata.(chan *ProducerError) -- expectation <- nil -- } --} -- --func (sp *syncProducer) handleErrors() { -- defer sp.wg.Done() -- for err := range sp.producer.Errors() { -- expectation := err.Msg.Metadata.(chan *ProducerError) -- expectation <- err -- } --} -- --func (sp *syncProducer) Close() error { -- sp.producer.AsyncClose() -- sp.wg.Wait() -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/Shopify/sarama/timestamp.go -deleted file mode 100644 -index 372278d0bfa22..0000000000000 ---- a/vendor/github.com/Shopify/sarama/timestamp.go -+++ /dev/null -@@ -1,40 +0,0 @@ --package sarama -- --import ( -- ""fmt"" -- ""time"" --) -- --type Timestamp struct { -- *time.Time --} -- --func (t Timestamp) encode(pe packetEncoder) error { -- timestamp := int64(-1) -- -- if !t.Before(time.Unix(0, 0)) { -- timestamp = t.UnixNano() / int64(time.Millisecond) -- } else if !t.IsZero() { -- return PacketEncodingError{fmt.Sprintf(""invalid timestamp (%v)"", t)} -- } -- -- pe.putInt64(timestamp) -- return nil --} -- --func (t Timestamp) decode(pd packetDecoder) error { -- millis, err := pd.getInt64() -- if err != nil { -- return err -- } -- -- // negative timestamps are invalid, in these cases we should return -- // a zero time -- timestamp := time.Time{} -- if millis >= 0 { -- timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) -- } -- -- *t.Time = timestamp -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go -deleted file mode 100644 -index 71e95b814cb33..0000000000000 ---- a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go -+++ /dev/null -@@ -1,126 +0,0 @@ --package sarama -- --type TxnOffsetCommitRequest struct { -- TransactionalID string -- GroupID string -- ProducerID int64 -- ProducerEpoch int16 -- Topics map[string][]*PartitionOffsetMetadata --} -- --func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error { -- if err := pe.putString(t.TransactionalID); err != nil { -- return err -- } -- if err := pe.putString(t.GroupID); err != nil { -- return err -- } -- pe.putInt64(t.ProducerID) -- pe.putInt16(t.ProducerEpoch) -- -- if err := pe.putArrayLength(len(t.Topics)); err != nil { -- return err -- } -- for topic, partitions := range t.Topics { -- if err := pe.putString(topic); err != nil { -- return err -- } -- if err := pe.putArrayLength(len(partitions)); err != nil { -- return err -- } -- for _, partition := range partitions { -- if err := partition.encode(pe); err != nil { -- return err -- } -- } -- } -- -- return nil --} -- --func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { -- if t.TransactionalID, err = pd.getString(); err != nil { -- return err -- } -- if t.GroupID, err = pd.getString(); err != nil { -- return err -- } -- if t.ProducerID, err = pd.getInt64(); err != nil { -- return err -- } -- if t.ProducerEpoch, err = pd.getInt16(); err != nil { -- return err -- } -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- t.Topics = make(map[string][]*PartitionOffsetMetadata) -- for i := 0; i < n; i++ { -- topic, err := pd.getString() -- if err != nil { -- return err -- } -- -- m, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- t.Topics[topic] = make([]*PartitionOffsetMetadata, m) -- -- for j := 0; j < m; j++ { -- partitionOffsetMetadata := new(PartitionOffsetMetadata) -- if err := partitionOffsetMetadata.decode(pd, version); err != nil { -- return err -- } -- t.Topics[topic][j] = partitionOffsetMetadata -- } -- } -- -- return nil --} -- --func (a *TxnOffsetCommitRequest) key() int16 { -- return 28 --} -- --func (a *TxnOffsetCommitRequest) version() int16 { -- return 0 --} -- --func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -- --type PartitionOffsetMetadata struct { -- Partition int32 -- Offset int64 -- Metadata *string --} -- --func (p *PartitionOffsetMetadata) encode(pe packetEncoder) error { -- pe.putInt32(p.Partition) -- pe.putInt64(p.Offset) -- if err := pe.putNullableString(p.Metadata); err != nil { -- return err -- } -- -- return nil --} -- --func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) { -- if p.Partition, err = pd.getInt32(); err != nil { -- return err -- } -- if p.Offset, err = pd.getInt64(); err != nil { -- return err -- } -- if p.Metadata, err = pd.getNullableString(); err != nil { -- return err -- } -- -- return nil --} -diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go -deleted file mode 100644 -index 6c980f4066f74..0000000000000 ---- a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go -+++ /dev/null -@@ -1,83 +0,0 @@ --package sarama -- --import ( -- ""time"" --) -- --type TxnOffsetCommitResponse struct { -- ThrottleTime time.Duration -- Topics map[string][]*PartitionError --} -- --func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error { -- pe.putInt32(int32(t.ThrottleTime / time.Millisecond)) -- if err := pe.putArrayLength(len(t.Topics)); err != nil { -- return err -- } -- -- for topic, e := range t.Topics { -- if err := pe.putString(topic); err != nil { -- return err -- } -- if err := pe.putArrayLength(len(e)); err != nil { -- return err -- } -- for _, partitionError := range e { -- if err := partitionError.encode(pe); err != nil { -- return err -- } -- } -- } -- -- return nil --} -- --func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { -- throttleTime, err := pd.getInt32() -- if err != nil { -- return err -- } -- t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond -- -- n, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- t.Topics = make(map[string][]*PartitionError) -- -- for i := 0; i < n; i++ { -- topic, err := pd.getString() -- if err != nil { -- return err -- } -- -- m, err := pd.getArrayLength() -- if err != nil { -- return err -- } -- -- t.Topics[topic] = make([]*PartitionError, m) -- -- for j := 0; j < m; j++ { -- t.Topics[topic][j] = new(PartitionError) -- if err := t.Topics[topic][j].decode(pd, version); err != nil { -- return err -- } -- } -- } -- -- return nil --} -- --func (a *TxnOffsetCommitResponse) key() int16 { -- return 28 --} -- --func (a *TxnOffsetCommitResponse) version() int16 { -- return 0 --} -- --func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion { -- return V0_11_0_0 --} -diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go -deleted file mode 100644 -index 9d7b60f161485..0000000000000 ---- a/vendor/github.com/Shopify/sarama/utils.go -+++ /dev/null -@@ -1,184 +0,0 @@ --package sarama -- --import ( -- ""bufio"" -- ""fmt"" -- ""net"" -- ""regexp"" --) -- --type none struct{} -- --// make []int32 sortable so we can sort partition numbers --type int32Slice []int32 -- --func (slice int32Slice) Len() int { -- return len(slice) --} -- --func (slice int32Slice) Less(i, j int) bool { -- return slice[i] < slice[j] --} -- --func (slice int32Slice) Swap(i, j int) { -- slice[i], slice[j] = slice[j], slice[i] --} -- --func dupInt32Slice(input []int32) []int32 { -- ret := make([]int32, 0, len(input)) -- for _, val := range input { -- ret = append(ret, val) -- } -- return ret --} -- --func withRecover(fn func()) { -- defer func() { -- handler := PanicHandler -- if handler != nil { -- if err := recover(); err != nil { -- handler(err) -- } -- } -- }() -- -- fn() --} -- --func safeAsyncClose(b *Broker) { -- tmp := b // local var prevents clobbering in goroutine -- go withRecover(func() { -- if connected, _ := tmp.Connected(); connected { -- if err := tmp.Close(); err != nil { -- Logger.Println(""Error closing broker"", tmp.ID(), "":"", err) -- } -- } -- }) --} -- --// Encoder is a simple interface for any type that can be encoded as an array of bytes --// in order to be sent as the key or value of a Kafka message. Length() is provided as an --// optimization, and must return the same as len() on the result of Encode(). --type Encoder interface { -- Encode() ([]byte, error) -- Length() int --} -- --// make strings and byte slices encodable for convenience so they can be used as keys --// and/or values in kafka messages -- --// StringEncoder implements the Encoder interface for Go strings so that they can be used --// as the Key or Value in a ProducerMessage. --type StringEncoder string -- --func (s StringEncoder) Encode() ([]byte, error) { -- return []byte(s), nil --} -- --func (s StringEncoder) Length() int { -- return len(s) --} -- --// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used --// as the Key or Value in a ProducerMessage. --type ByteEncoder []byte -- --func (b ByteEncoder) Encode() ([]byte, error) { -- return b, nil --} -- --func (b ByteEncoder) Length() int { -- return len(b) --} -- --// bufConn wraps a net.Conn with a buffer for reads to reduce the number of --// reads that trigger syscalls. --type bufConn struct { -- net.Conn -- buf *bufio.Reader --} -- --func newBufConn(conn net.Conn) *bufConn { -- return &bufConn{ -- Conn: conn, -- buf: bufio.NewReader(conn), -- } --} -- --func (bc *bufConn) Read(b []byte) (n int, err error) { -- return bc.buf.Read(b) --} -- --// KafkaVersion instances represent versions of the upstream Kafka broker. --type KafkaVersion struct { -- // it's a struct rather than just typing the array directly to make it opaque and stop people -- // generating their own arbitrary versions -- version [4]uint --} -- --func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion { -- return KafkaVersion{ -- version: [4]uint{major, minor, veryMinor, patch}, -- } --} -- --// IsAtLeast return true if and only if the version it is called on is --// greater than or equal to the version passed in: --// V1.IsAtLeast(V2) // false --// V2.IsAtLeast(V1) // true --func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool { -- for i := range v.version { -- if v.version[i] > other.version[i] { -- return true -- } else if v.version[i] < other.version[i] { -- return false -- } -- } -- return true --} -- --// Effective constants defining the supported kafka versions. --var ( -- V0_8_2_0 = newKafkaVersion(0, 8, 2, 0) -- V0_8_2_1 = newKafkaVersion(0, 8, 2, 1) -- V0_8_2_2 = newKafkaVersion(0, 8, 2, 2) -- V0_9_0_0 = newKafkaVersion(0, 9, 0, 0) -- V0_9_0_1 = newKafkaVersion(0, 9, 0, 1) -- V0_10_0_0 = newKafkaVersion(0, 10, 0, 0) -- V0_10_0_1 = newKafkaVersion(0, 10, 0, 1) -- V0_10_1_0 = newKafkaVersion(0, 10, 1, 0) -- V0_10_2_0 = newKafkaVersion(0, 10, 2, 0) -- V0_11_0_0 = newKafkaVersion(0, 11, 0, 0) -- V1_0_0_0 = newKafkaVersion(1, 0, 0, 0) -- minVersion = V0_8_2_0 --) -- --func ParseKafkaVersion(s string) (KafkaVersion, error) { -- var major, minor, veryMinor, patch uint -- var err error -- if s[0] == '0' { -- err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, ""0.%d.%d.%d"", [3]*uint{&minor, &veryMinor, &patch}) -- } else { -- err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, ""%d.%d.%d"", [3]*uint{&major, &minor, &veryMinor}) -- } -- if err != nil { -- return minVersion, err -- } -- return newKafkaVersion(major, minor, veryMinor, patch), nil --} -- --func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error { -- if !regexp.MustCompile(pattern).MatchString(s) { -- return fmt.Errorf(""invalid version `%s`"", s) -- } -- _, err := fmt.Sscanf(s, format, v[0], v[1], v[2]) -- return err --} -- --func (v KafkaVersion) String() string { -- if v.version[0] == 0 { -- return fmt.Sprintf(""0.%d.%d.%d"", v.version[1], v.version[2], v.version[3]) -- } else { -- return fmt.Sprintf(""%d.%d.%d"", v.version[0], v.version[1], v.version[2]) -- } --} -diff --git a/vendor/github.com/apache/thrift/LICENSE b/vendor/github.com/apache/thrift/LICENSE -deleted file mode 100644 -index 3b6d7d74cc9b1..0000000000000 ---- a/vendor/github.com/apache/thrift/LICENSE -+++ /dev/null -@@ -1,239 +0,0 @@ -- -- Apache License -- Version 2.0, January 2004 -- http://www.apache.org/licenses/ -- -- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -- -- 1. Definitions. -- -- ""License"" shall mean the terms and conditions for use, reproduction, -- and distribution as defined by Sections 1 through 9 of this document. -- -- ""Licensor"" shall mean the copyright owner or entity authorized by -- the copyright owner that is granting the License. -- -- ""Legal Entity"" shall mean the union of the acting entity and all -- other entities that control, are controlled by, or are under common -- control with that entity. For the purposes of this definition, -- ""control"" means (i) the power, direct or indirect, to cause the -- direction or management of such entity, whether by contract or -- otherwise, or (ii) ownership of fifty percent (50%) or more of the -- outstanding shares, or (iii) beneficial ownership of such entity. -- -- ""You"" (or ""Your"") shall mean an individual or Legal Entity -- exercising permissions granted by this License. -- -- ""Source"" form shall mean the preferred form for making modifications, -- including but not limited to software source code, documentation -- source, and configuration files. -- -- ""Object"" form shall mean any form resulting from mechanical -- transformation or translation of a Source form, including but -- not limited to compiled object code, generated documentation, -- and conversions to other media types. -- -- ""Work"" shall mean the work of authorship, whether in Source or -- Object form, made available under the License, as indicated by a -- copyright notice that is included in or attached to the work -- (an example is provided in the Appendix below). -- -- ""Derivative Works"" shall mean any work, whether in Source or Object -- form, that is based on (or derived from) the Work and for which the -- editorial revisions, annotations, elaborations, or other modifications -- represent, as a whole, an original work of authorship. For the purposes -- of this License, Derivative Works shall not include works that remain -- separable from, or merely link (or bind by name) to the interfaces of, -- the Work and Derivative Works thereof. -- -- ""Contribution"" shall mean any work of authorship, including -- the original version of the Work and any modifications or additions -- to that Work or Derivative Works thereof, that is intentionally -- submitted to Licensor for inclusion in the Work by the copyright owner -- or by an individual or Legal Entity authorized to submit on behalf of -- the copyright owner. For the purposes of this definition, ""submitted"" -- means any form of electronic, verbal, or written communication sent -- to the Licensor or its representatives, including but not limited to -- communication on electronic mailing lists, source code control systems, -- and issue tracking systems that are managed by, or on behalf of, the -- Licensor for the purpose of discussing and improving the Work, but -- excluding communication that is conspicuously marked or otherwise -- designated in writing by the copyright owner as ""Not a Contribution."" -- -- ""Contributor"" shall mean Licensor and any individual or Legal Entity -- on behalf of whom a Contribution has been received by Licensor and -- subsequently incorporated within the Work. -- -- 2. Grant of Copyright License. Subject to the terms and conditions of -- this License, each Contributor hereby grants to You a perpetual, -- worldwide, non-exclusive, no-charge, royalty-free, irrevocable -- copyright license to reproduce, prepare Derivative Works of, -- publicly display, publicly perform, sublicense, and distribute the -- Work and such Derivative Works in Source or Object form. -- -- 3. Grant of Patent License. Subject to the terms and conditions of -- this License, each Contributor hereby grants to You a perpetual, -- worldwide, non-exclusive, no-charge, royalty-free, irrevocable -- (except as stated in this section) patent license to make, have made, -- use, offer to sell, sell, import, and otherwise transfer the Work, -- where such license applies only to those patent claims licensable -- by such Contributor that are necessarily infringed by their -- Contribution(s) alone or by combination of their Contribution(s) -- with the Work to which such Contribution(s) was submitted. If You -- institute patent litigation against any entity (including a -- cross-claim or counterclaim in a lawsuit) alleging that the Work -- or a Contribution incorporated within the Work constitutes direct -- or contributory patent infringement, then any patent licenses -- granted to You under this License for that Work shall terminate -- as of the date such litigation is filed. -- -- 4. Redistribution. You may reproduce and distribute copies of the -- Work or Derivative Works thereof in any medium, with or without -- modifications, and in Source or Object form, provided that You -- meet the following conditions: -- -- (a) You must give any other recipients of the Work or -- Derivative Works a copy of this License; and -- -- (b) You must cause any modified files to carry prominent notices -- stating that You changed the files; and -- -- (c) You must retain, in the Source form of any Derivative Works -- that You distribute, all copyright, patent, trademark, and -- attribution notices from the Source form of the Work, -- excluding those notices that do not pertain to any part of -- the Derivative Works; and -- -- (d) If the Work includes a ""NOTICE"" text file as part of its -- distribution, then any Derivative Works that You distribute must -- include a readable copy of the attribution notices contained -- within such NOTICE file, excluding those notices that do not -- pertain to any part of the Derivative Works, in at least one -- of the following places: within a NOTICE text file distributed -- as part of the Derivative Works; within the Source form or -- documentation, if provided along with the Derivative Works; or, -- within a display generated by the Derivative Works, if and -- wherever such third-party notices normally appear. The contents -- of the NOTICE file are for informational purposes only and -- do not modify the License. You may add Your own attribution -- notices within Derivative Works that You distribute, alongside -- or as an addendum to the NOTICE text from the Work, provided -- that such additional attribution notices cannot be construed -- as modifying the License. -- -- You may add Your own copyright statement to Your modifications and -- may provide additional or different license terms and conditions -- for use, reproduction, or distribution of Your modifications, or -- for any such Derivative Works as a whole, provided Your use, -- reproduction, and distribution of the Work otherwise complies with -- the conditions stated in this License. -- -- 5. Submission of Contributions. Unless You explicitly state otherwise, -- any Contribution intentionally submitted for inclusion in the Work -- by You to the Licensor shall be under the terms and conditions of -- this License, without any additional terms or conditions. -- Notwithstanding the above, nothing herein shall supersede or modify -- the terms of any separate license agreement you may have executed -- with Licensor regarding such Contributions. -- -- 6. Trademarks. This License does not grant permission to use the trade -- names, trademarks, service marks, or product names of the Licensor, -- except as required for reasonable and customary use in describing the -- origin of the Work and reproducing the content of the NOTICE file. -- -- 7. Disclaimer of Warranty. Unless required by applicable law or -- agreed to in writing, Licensor provides the Work (and each -- Contributor provides its Contributions) on an ""AS IS"" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -- implied, including, without limitation, any warranties or conditions -- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A -- PARTICULAR PURPOSE. You are solely responsible for determining the -- appropriateness of using or redistributing the Work and assume any -- risks associated with Your exercise of permissions under this License. -- -- 8. Limitation of Liability. In no event and under no legal theory, -- whether in tort (including negligence), contract, or otherwise, -- unless required by applicable law (such as deliberate and grossly -- negligent acts) or agreed to in writing, shall any Contributor be -- liable to You for damages, including any direct, indirect, special, -- incidental, or consequential damages of any character arising as a -- result of this License or out of the use or inability to use the -- Work (including but not limited to damages for loss of goodwill, -- work stoppage, computer failure or malfunction, or any and all -- other commercial damages or losses), even if such Contributor -- has been advised of the possibility of such damages. -- -- 9. Accepting Warranty or Additional Liability. While redistributing -- the Work or Derivative Works thereof, You may choose to offer, -- and charge a fee for, acceptance of support, warranty, indemnity, -- or other liability obligations and/or rights consistent with this -- License. However, in accepting such obligations, You may act only -- on Your own behalf and on Your sole responsibility, not on behalf -- of any other Contributor, and only if You agree to indemnify, -- defend, and hold each Contributor harmless for any liability -- incurred by, or claims asserted against, such Contributor by reason -- of your accepting any such warranty or additional liability. -- -- END OF TERMS AND CONDITIONS -- -- APPENDIX: How to apply the Apache License to your work. -- -- To apply the Apache License to your work, attach the following -- boilerplate notice, with the fields enclosed by brackets ""[]"" -- replaced with your own identifying information. (Don't include -- the brackets!) The text should be enclosed in the appropriate -- comment syntax for the file format. We also recommend that a -- file or class name and description of purpose be included on the -- same ""printed page"" as the copyright notice for easier -- identification within third-party archives. -- -- Copyright [yyyy] [name of copyright owner] -- -- Licensed under the Apache License, Version 2.0 (the ""License""); -- you may not use this file except in compliance with the License. -- You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an ""AS IS"" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the License for the specific language governing permissions and -- limitations under the License. -- ---------------------------------------------------- --SOFTWARE DISTRIBUTED WITH THRIFT: -- --The Apache Thrift software includes a number of subcomponents with --separate copyright notices and license terms. Your use of the source --code for the these subcomponents is subject to the terms and --conditions of the following licenses. -- ---------------------------------------------------- --Portions of the following files are licensed under the MIT License: -- -- lib/erl/src/Makefile.am -- --Please see doc/otp-base-license.txt for the full terms of this license. -- ---------------------------------------------------- --For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: -- --# Copyright (c) 2007 Thomas Porschberg --# --# Copying and distribution of this file, with or without --# modification, are permitted in any medium without royalty provided --# the copyright notice and this notice are preserved. -- ---------------------------------------------------- --For the lib/nodejs/lib/thrift/json_parse.js: -- --/* -- json_parse.js -- 2015-05-02 -- Public Domain. -- NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -- --*/ --(By Douglas Crockford ) ---------------------------------------------------- -diff --git a/vendor/github.com/apache/thrift/NOTICE b/vendor/github.com/apache/thrift/NOTICE -deleted file mode 100644 -index 902dc8d3140f7..0000000000000 ---- a/vendor/github.com/apache/thrift/NOTICE -+++ /dev/null -@@ -1,5 +0,0 @@ --Apache Thrift --Copyright 2006-2017 The Apache Software Foundation. -- --This product includes software developed at --The Apache Software Foundation (http://www.apache.org/). -diff --git a/vendor/github.com/apache/thrift/contrib/fb303/LICENSE b/vendor/github.com/apache/thrift/contrib/fb303/LICENSE -deleted file mode 100644 -index 4eacb64317973..0000000000000 ---- a/vendor/github.com/apache/thrift/contrib/fb303/LICENSE -+++ /dev/null -@@ -1,16 +0,0 @@ --Licensed to the Apache Software Foundation (ASF) under one --or more contributor license agreements. See the NOTICE file --distributed with this work for additional information --regarding copyright ownership. The ASF licenses this file --to you under the Apache License, Version 2.0 (the --""License""); you may not use this file except in compliance --with the License. You may obtain a copy of the License at -- --http://www.apache.org/licenses/LICENSE-2.0 -- --Unless required by applicable law or agreed to in writing, --software distributed under the License is distributed on an --""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY --KIND, either express or implied. See the License for the --specific language governing permissions and limitations --under the License. -diff --git a/vendor/github.com/apache/thrift/debian/copyright b/vendor/github.com/apache/thrift/debian/copyright -deleted file mode 100644 -index 850643c9aa4ba..0000000000000 ---- a/vendor/github.com/apache/thrift/debian/copyright -+++ /dev/null -@@ -1,129 +0,0 @@ --This package was debianized by Thrift Developer's . -- -- --This package and the Debian packaging is licensed under the Apache License, --see `/usr/share/common-licenses/Apache-2.0'. -- --The following information was copied from Apache Thrift LICENSE file. -- ---------------------------------------------------- --SOFTWARE DISTRIBUTED WITH THRIFT: -- --The Apache Thrift software includes a number of subcomponents with --separate copyright notices and license terms. Your use of the source --code for the these subcomponents is subject to the terms and --conditions of the following licenses. -- ---------------------------------------------------- --Portions of the following files are licensed under the MIT License: -- -- lib/erl/src/Makefile.am -- --Please see doc/otp-base-license.txt for the full terms of this license. -- -- ---------------------------------------------------- --The following files contain some portions of code contributed under --the Thrift Software License (see doc/old-thrift-license.txt), and relicensed --under the Apache 2.0 License: -- -- compiler/cpp/Makefile.am -- compiler/cpp/src/generate/t_cocoa_generator.cc -- compiler/cpp/src/generate/t_cpp_generator.cc -- compiler/cpp/src/generate/t_csharp_generator.cc -- compiler/cpp/src/generate/t_erl_generator.cc -- compiler/cpp/src/generate/t_hs_generator.cc -- compiler/cpp/src/generate/t_java_generator.cc -- compiler/cpp/src/generate/t_ocaml_generator.cc -- compiler/cpp/src/generate/t_perl_generator.cc -- compiler/cpp/src/generate/t_php_generator.cc -- compiler/cpp/src/generate/t_py_generator.cc -- compiler/cpp/src/generate/t_rb_generator.cc -- compiler/cpp/src/generate/t_st_generator.cc -- compiler/cpp/src/generate/t_xsd_generator.cc -- compiler/cpp/src/main.cc -- compiler/cpp/src/parse/t_field.h -- compiler/cpp/src/parse/t_program.h -- compiler/cpp/src/platform.h -- compiler/cpp/src/thriftl.ll -- compiler/cpp/src/thrifty.yy -- lib/csharp/src/Protocol/TBinaryProtocol.cs -- lib/csharp/src/Protocol/TField.cs -- lib/csharp/src/Protocol/TList.cs -- lib/csharp/src/Protocol/TMap.cs -- lib/csharp/src/Protocol/TMessage.cs -- lib/csharp/src/Protocol/TMessageType.cs -- lib/csharp/src/Protocol/TProtocol.cs -- lib/csharp/src/Protocol/TProtocolException.cs -- lib/csharp/src/Protocol/TProtocolFactory.cs -- lib/csharp/src/Protocol/TProtocolUtil.cs -- lib/csharp/src/Protocol/TSet.cs -- lib/csharp/src/Protocol/TStruct.cs -- lib/csharp/src/Protocol/TType.cs -- lib/csharp/src/Server/TServer.cs -- lib/csharp/src/Server/TSimpleServer.cs -- lib/csharp/src/Server/TThreadPoolServer.cs -- lib/csharp/src/TApplicationException.cs -- lib/csharp/src/Thrift.csproj -- lib/csharp/src/Thrift.sln -- lib/csharp/src/TProcessor.cs -- lib/csharp/src/Transport/TServerSocket.cs -- lib/csharp/src/Transport/TServerTransport.cs -- lib/csharp/src/Transport/TSocket.cs -- lib/csharp/src/Transport/TStreamTransport.cs -- lib/csharp/src/Transport/TTransport.cs -- lib/csharp/src/Transport/TTransportException.cs -- lib/csharp/src/Transport/TTransportFactory.cs -- lib/csharp/ThriftMSBuildTask/Properties/AssemblyInfo.cs -- lib/csharp/ThriftMSBuildTask/ThriftBuild.cs -- lib/csharp/ThriftMSBuildTask/ThriftMSBuildTask.csproj -- lib/rb/lib/thrift.rb -- lib/st/README -- lib/st/thrift.st -- test/OptionalRequiredTest.cpp -- test/OptionalRequiredTest.thrift -- test/ThriftTest.thrift -- ---------------------------------------------------- --For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: -- --# Copyright (c) 2007 Thomas Porschberg --# --# Copying and distribution of this file, with or without --# modification, are permitted in any medium without royalty provided --# the copyright notice and this notice are preserved. -- ---------------------------------------------------- --For the compiler/cpp/src/md5.[ch] components: -- --/* -- Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved. -- -- This software is provided 'as-is', without any express or implied -- warranty. In no event will the authors be held liable for any damages -- arising from the use of this software. -- -- Permission is granted to anyone to use this software for any purpose, -- including commercial applications, and to alter it and redistribute it -- freely, subject to the following restrictions: -- -- 1. The origin of this software must not be misrepresented; you must not -- claim that you wrote the original software. If you use this software -- in a product, an acknowledgment in the product documentation would be -- appreciated but is not required. -- 2. Altered source versions must be plainly marked as such, and must not be -- misrepresented as being the original software. -- 3. This notice may not be removed or altered from any source distribution. -- -- L. Peter Deutsch -- ghost@aladdin.com -- -- */ -- ----------------------------------------------------- --For the lib/rb/setup.rb: Copyright (c) 2000-2005 Minero Aoki, --lib/ocaml/OCamlMakefile and lib/ocaml/README-OCamlMakefile components: -- Copyright (C) 1999 - 2007 Markus Mottl -- --Licensed under the terms of the GNU Lesser General Public License 2.1 --(see doc/lgpl-2.1.txt for the full terms of this license) -diff --git a/vendor/github.com/apache/thrift/lib/dart/LICENSE_HEADER b/vendor/github.com/apache/thrift/lib/dart/LICENSE_HEADER -deleted file mode 100644 -index 4eacb64317973..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/dart/LICENSE_HEADER -+++ /dev/null -@@ -1,16 +0,0 @@ --Licensed to the Apache Software Foundation (ASF) under one --or more contributor license agreements. See the NOTICE file --distributed with this work for additional information --regarding copyright ownership. The ASF licenses this file --to you under the Apache License, Version 2.0 (the --""License""); you may not use this file except in compliance --with the License. You may obtain a copy of the License at -- --http://www.apache.org/licenses/LICENSE-2.0 -- --Unless required by applicable law or agreed to in writing, --software distributed under the License is distributed on an --""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY --KIND, either express or implied. See the License for the --specific language governing permissions and limitations --under the License. -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go -deleted file mode 100644 -index b9d7eedcdd7ec..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go -+++ /dev/null -@@ -1,164 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --const ( -- UNKNOWN_APPLICATION_EXCEPTION = 0 -- UNKNOWN_METHOD = 1 -- INVALID_MESSAGE_TYPE_EXCEPTION = 2 -- WRONG_METHOD_NAME = 3 -- BAD_SEQUENCE_ID = 4 -- MISSING_RESULT = 5 -- INTERNAL_ERROR = 6 -- PROTOCOL_ERROR = 7 --) -- --var defaultApplicationExceptionMessage = map[int32]string{ -- UNKNOWN_APPLICATION_EXCEPTION: ""unknown application exception"", -- UNKNOWN_METHOD: ""unknown method"", -- INVALID_MESSAGE_TYPE_EXCEPTION: ""invalid message type"", -- WRONG_METHOD_NAME: ""wrong method name"", -- BAD_SEQUENCE_ID: ""bad sequence ID"", -- MISSING_RESULT: ""missing result"", -- INTERNAL_ERROR: ""unknown internal error"", -- PROTOCOL_ERROR: ""unknown protocol error"", --} -- --// Application level Thrift exception --type TApplicationException interface { -- TException -- TypeId() int32 -- Read(iprot TProtocol) error -- Write(oprot TProtocol) error --} -- --type tApplicationException struct { -- message string -- type_ int32 --} -- --func (e tApplicationException) Error() string { -- if e.message != """" { -- return e.message -- } -- return defaultApplicationExceptionMessage[e.type_] --} -- --func NewTApplicationException(type_ int32, message string) TApplicationException { -- return &tApplicationException{message, type_} --} -- --func (p *tApplicationException) TypeId() int32 { -- return p.type_ --} -- --func (p *tApplicationException) Read(iprot TProtocol) error { -- // TODO: this should really be generated by the compiler -- _, err := iprot.ReadStructBegin() -- if err != nil { -- return err -- } -- -- message := """" -- type_ := int32(UNKNOWN_APPLICATION_EXCEPTION) -- -- for { -- _, ttype, id, err := iprot.ReadFieldBegin() -- if err != nil { -- return err -- } -- if ttype == STOP { -- break -- } -- switch id { -- case 1: -- if ttype == STRING { -- if message, err = iprot.ReadString(); err != nil { -- return err -- } -- } else { -- if err = SkipDefaultDepth(iprot, ttype); err != nil { -- return err -- } -- } -- case 2: -- if ttype == I32 { -- if type_, err = iprot.ReadI32(); err != nil { -- return err -- } -- } else { -- if err = SkipDefaultDepth(iprot, ttype); err != nil { -- return err -- } -- } -- default: -- if err = SkipDefaultDepth(iprot, ttype); err != nil { -- return err -- } -- } -- if err = iprot.ReadFieldEnd(); err != nil { -- return err -- } -- } -- if err := iprot.ReadStructEnd(); err != nil { -- return err -- } -- -- p.message = message -- p.type_ = type_ -- -- return nil --} -- --func (p *tApplicationException) Write(oprot TProtocol) (err error) { -- err = oprot.WriteStructBegin(""TApplicationException"") -- if len(p.Error()) > 0 { -- err = oprot.WriteFieldBegin(""message"", STRING, 1) -- if err != nil { -- return -- } -- err = oprot.WriteString(p.Error()) -- if err != nil { -- return -- } -- err = oprot.WriteFieldEnd() -- if err != nil { -- return -- } -- } -- err = oprot.WriteFieldBegin(""type"", I32, 2) -- if err != nil { -- return -- } -- err = oprot.WriteI32(p.type_) -- if err != nil { -- return -- } -- err = oprot.WriteFieldEnd() -- if err != nil { -- return -- } -- err = oprot.WriteFieldStop() -- if err != nil { -- return -- } -- err = oprot.WriteStructEnd() -- return --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go -deleted file mode 100644 -index 690d341111b5f..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go -+++ /dev/null -@@ -1,514 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""bytes"" -- ""encoding/binary"" -- ""errors"" -- ""fmt"" -- ""io"" -- ""math"" --) -- --type TBinaryProtocol struct { -- trans TRichTransport -- origTransport TTransport -- reader io.Reader -- writer io.Writer -- strictRead bool -- strictWrite bool -- buffer [64]byte --} -- --type TBinaryProtocolFactory struct { -- strictRead bool -- strictWrite bool --} -- --func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol { -- return NewTBinaryProtocol(t, false, true) --} -- --func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol { -- p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite} -- if et, ok := t.(TRichTransport); ok { -- p.trans = et -- } else { -- p.trans = NewTRichTransport(t) -- } -- p.reader = p.trans -- p.writer = p.trans -- return p --} -- --func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory { -- return NewTBinaryProtocolFactory(false, true) --} -- --func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory { -- return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite} --} -- --func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol { -- return NewTBinaryProtocol(t, p.strictRead, p.strictWrite) --} -- --/** -- * Writing Methods -- */ -- --func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { -- if p.strictWrite { -- version := uint32(VERSION_1) | uint32(typeId) -- e := p.WriteI32(int32(version)) -- if e != nil { -- return e -- } -- e = p.WriteString(name) -- if e != nil { -- return e -- } -- e = p.WriteI32(seqId) -- return e -- } else { -- e := p.WriteString(name) -- if e != nil { -- return e -- } -- e = p.WriteByte(int8(typeId)) -- if e != nil { -- return e -- } -- e = p.WriteI32(seqId) -- return e -- } -- return nil --} -- --func (p *TBinaryProtocol) WriteMessageEnd() error { -- return nil --} -- --func (p *TBinaryProtocol) WriteStructBegin(name string) error { -- return nil --} -- --func (p *TBinaryProtocol) WriteStructEnd() error { -- return nil --} -- --func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { -- e := p.WriteByte(int8(typeId)) -- if e != nil { -- return e -- } -- e = p.WriteI16(id) -- return e --} -- --func (p *TBinaryProtocol) WriteFieldEnd() error { -- return nil --} -- --func (p *TBinaryProtocol) WriteFieldStop() error { -- e := p.WriteByte(STOP) -- return e --} -- --func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { -- e := p.WriteByte(int8(keyType)) -- if e != nil { -- return e -- } -- e = p.WriteByte(int8(valueType)) -- if e != nil { -- return e -- } -- e = p.WriteI32(int32(size)) -- return e --} -- --func (p *TBinaryProtocol) WriteMapEnd() error { -- return nil --} -- --func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error { -- e := p.WriteByte(int8(elemType)) -- if e != nil { -- return e -- } -- e = p.WriteI32(int32(size)) -- return e --} -- --func (p *TBinaryProtocol) WriteListEnd() error { -- return nil --} -- --func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error { -- e := p.WriteByte(int8(elemType)) -- if e != nil { -- return e -- } -- e = p.WriteI32(int32(size)) -- return e --} -- --func (p *TBinaryProtocol) WriteSetEnd() error { -- return nil --} -- --func (p *TBinaryProtocol) WriteBool(value bool) error { -- if value { -- return p.WriteByte(1) -- } -- return p.WriteByte(0) --} -- --func (p *TBinaryProtocol) WriteByte(value int8) error { -- e := p.trans.WriteByte(byte(value)) -- return NewTProtocolException(e) --} -- --func (p *TBinaryProtocol) WriteI16(value int16) error { -- v := p.buffer[0:2] -- binary.BigEndian.PutUint16(v, uint16(value)) -- _, e := p.writer.Write(v) -- return NewTProtocolException(e) --} -- --func (p *TBinaryProtocol) WriteI32(value int32) error { -- v := p.buffer[0:4] -- binary.BigEndian.PutUint32(v, uint32(value)) -- _, e := p.writer.Write(v) -- return NewTProtocolException(e) --} -- --func (p *TBinaryProtocol) WriteI64(value int64) error { -- v := p.buffer[0:8] -- binary.BigEndian.PutUint64(v, uint64(value)) -- _, err := p.writer.Write(v) -- return NewTProtocolException(err) --} -- --func (p *TBinaryProtocol) WriteDouble(value float64) error { -- return p.WriteI64(int64(math.Float64bits(value))) --} -- --func (p *TBinaryProtocol) WriteString(value string) error { -- e := p.WriteI32(int32(len(value))) -- if e != nil { -- return e -- } -- _, err := p.trans.WriteString(value) -- return NewTProtocolException(err) --} -- --func (p *TBinaryProtocol) WriteBinary(value []byte) error { -- e := p.WriteI32(int32(len(value))) -- if e != nil { -- return e -- } -- _, err := p.writer.Write(value) -- return NewTProtocolException(err) --} -- --/** -- * Reading methods -- */ -- --func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { -- size, e := p.ReadI32() -- if e != nil { -- return """", typeId, 0, NewTProtocolException(e) -- } -- if size < 0 { -- typeId = TMessageType(size & 0x0ff) -- version := int64(int64(size) & VERSION_MASK) -- if version != VERSION_1 { -- return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf(""Bad version in ReadMessageBegin"")) -- } -- name, e = p.ReadString() -- if e != nil { -- return name, typeId, seqId, NewTProtocolException(e) -- } -- seqId, e = p.ReadI32() -- if e != nil { -- return name, typeId, seqId, NewTProtocolException(e) -- } -- return name, typeId, seqId, nil -- } -- if p.strictRead { -- return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf(""Missing version in ReadMessageBegin"")) -- } -- name, e2 := p.readStringBody(size) -- if e2 != nil { -- return name, typeId, seqId, e2 -- } -- b, e3 := p.ReadByte() -- if e3 != nil { -- return name, typeId, seqId, e3 -- } -- typeId = TMessageType(b) -- seqId, e4 := p.ReadI32() -- if e4 != nil { -- return name, typeId, seqId, e4 -- } -- return name, typeId, seqId, nil --} -- --func (p *TBinaryProtocol) ReadMessageEnd() error { -- return nil --} -- --func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) { -- return --} -- --func (p *TBinaryProtocol) ReadStructEnd() error { -- return nil --} -- --func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) { -- t, err := p.ReadByte() -- typeId = TType(t) -- if err != nil { -- return name, typeId, seqId, err -- } -- if t != STOP { -- seqId, err = p.ReadI16() -- } -- return name, typeId, seqId, err --} -- --func (p *TBinaryProtocol) ReadFieldEnd() error { -- return nil --} -- --var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New(""Invalid data length"")) -- --func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) { -- k, e := p.ReadByte() -- if e != nil { -- err = NewTProtocolException(e) -- return -- } -- kType = TType(k) -- v, e := p.ReadByte() -- if e != nil { -- err = NewTProtocolException(e) -- return -- } -- vType = TType(v) -- size32, e := p.ReadI32() -- if e != nil { -- err = NewTProtocolException(e) -- return -- } -- if size32 < 0 { -- err = invalidDataLength -- return -- } -- size = int(size32) -- return kType, vType, size, nil --} -- --func (p *TBinaryProtocol) ReadMapEnd() error { -- return nil --} -- --func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) { -- b, e := p.ReadByte() -- if e != nil { -- err = NewTProtocolException(e) -- return -- } -- elemType = TType(b) -- size32, e := p.ReadI32() -- if e != nil { -- err = NewTProtocolException(e) -- return -- } -- if size32 < 0 { -- err = invalidDataLength -- return -- } -- size = int(size32) -- -- return --} -- --func (p *TBinaryProtocol) ReadListEnd() error { -- return nil --} -- --func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) { -- b, e := p.ReadByte() -- if e != nil { -- err = NewTProtocolException(e) -- return -- } -- elemType = TType(b) -- size32, e := p.ReadI32() -- if e != nil { -- err = NewTProtocolException(e) -- return -- } -- if size32 < 0 { -- err = invalidDataLength -- return -- } -- size = int(size32) -- return elemType, size, nil --} -- --func (p *TBinaryProtocol) ReadSetEnd() error { -- return nil --} -- --func (p *TBinaryProtocol) ReadBool() (bool, error) { -- b, e := p.ReadByte() -- v := true -- if b != 1 { -- v = false -- } -- return v, e --} -- --func (p *TBinaryProtocol) ReadByte() (int8, error) { -- v, err := p.trans.ReadByte() -- return int8(v), err --} -- --func (p *TBinaryProtocol) ReadI16() (value int16, err error) { -- buf := p.buffer[0:2] -- err = p.readAll(buf) -- value = int16(binary.BigEndian.Uint16(buf)) -- return value, err --} -- --func (p *TBinaryProtocol) ReadI32() (value int32, err error) { -- buf := p.buffer[0:4] -- err = p.readAll(buf) -- value = int32(binary.BigEndian.Uint32(buf)) -- return value, err --} -- --func (p *TBinaryProtocol) ReadI64() (value int64, err error) { -- buf := p.buffer[0:8] -- err = p.readAll(buf) -- value = int64(binary.BigEndian.Uint64(buf)) -- return value, err --} -- --func (p *TBinaryProtocol) ReadDouble() (value float64, err error) { -- buf := p.buffer[0:8] -- err = p.readAll(buf) -- value = math.Float64frombits(binary.BigEndian.Uint64(buf)) -- return value, err --} -- --func (p *TBinaryProtocol) ReadString() (value string, err error) { -- size, e := p.ReadI32() -- if e != nil { -- return """", e -- } -- if size < 0 { -- err = invalidDataLength -- return -- } -- -- return p.readStringBody(size) --} -- --func (p *TBinaryProtocol) ReadBinary() ([]byte, error) { -- size, e := p.ReadI32() -- if e != nil { -- return nil, e -- } -- if size < 0 { -- return nil, invalidDataLength -- } -- if uint64(size) > p.trans.RemainingBytes() { -- return nil, invalidDataLength -- } -- -- isize := int(size) -- buf := make([]byte, isize) -- _, err := io.ReadFull(p.trans, buf) -- return buf, NewTProtocolException(err) --} -- --func (p *TBinaryProtocol) Flush() (err error) { -- return NewTProtocolException(p.trans.Flush()) --} -- --func (p *TBinaryProtocol) Skip(fieldType TType) (err error) { -- return SkipDefaultDepth(p, fieldType) --} -- --func (p *TBinaryProtocol) Transport() TTransport { -- return p.origTransport --} -- --func (p *TBinaryProtocol) readAll(buf []byte) error { -- _, err := io.ReadFull(p.reader, buf) -- return NewTProtocolException(err) --} -- --const readLimit = 32768 -- --func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) { -- if size < 0 { -- return """", nil -- } -- if uint64(size) > p.trans.RemainingBytes() { -- return """", invalidDataLength -- } -- -- var ( -- buf bytes.Buffer -- e error -- b []byte -- ) -- -- switch { -- case int(size) <= len(p.buffer): -- b = p.buffer[:size] // avoids allocation for small reads -- case int(size) < readLimit: -- b = make([]byte, size) -- default: -- b = make([]byte, readLimit) -- } -- -- for size > 0 { -- _, e = io.ReadFull(p.trans, b) -- buf.Write(b) -- if e != nil { -- break -- } -- size -= readLimit -- if size < readLimit && size > 0 { -- b = b[:size] -- } -- } -- return buf.String(), NewTProtocolException(e) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go -deleted file mode 100644 -index b754f925d0867..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go -+++ /dev/null -@@ -1,91 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""bufio"" --) -- --type TBufferedTransportFactory struct { -- size int --} -- --type TBufferedTransport struct { -- bufio.ReadWriter -- tp TTransport --} -- --func (p *TBufferedTransportFactory) GetTransport(trans TTransport) (TTransport, error) { -- return NewTBufferedTransport(trans, p.size), nil --} -- --func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory { -- return &TBufferedTransportFactory{size: bufferSize} --} -- --func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport { -- return &TBufferedTransport{ -- ReadWriter: bufio.ReadWriter{ -- Reader: bufio.NewReaderSize(trans, bufferSize), -- Writer: bufio.NewWriterSize(trans, bufferSize), -- }, -- tp: trans, -- } --} -- --func (p *TBufferedTransport) IsOpen() bool { -- return p.tp.IsOpen() --} -- --func (p *TBufferedTransport) Open() (err error) { -- return p.tp.Open() --} -- --func (p *TBufferedTransport) Close() (err error) { -- return p.tp.Close() --} -- --func (p *TBufferedTransport) Read(b []byte) (int, error) { -- n, err := p.ReadWriter.Read(b) -- if err != nil { -- p.ReadWriter.Reader.Reset(p.tp) -- } -- return n, err --} -- --func (p *TBufferedTransport) Write(b []byte) (int, error) { -- n, err := p.ReadWriter.Write(b) -- if err != nil { -- p.ReadWriter.Writer.Reset(p.tp) -- } -- return n, err --} -- --func (p *TBufferedTransport) Flush() error { -- if err := p.ReadWriter.Flush(); err != nil { -- p.ReadWriter.Writer.Reset(p.tp) -- return err -- } -- return p.tp.Flush() --} -- --func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) { -- return p.tp.RemainingBytes() --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/client.go b/vendor/github.com/apache/thrift/lib/go/thrift/client.go -deleted file mode 100644 -index 8bdb53d8d9f7c..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/client.go -+++ /dev/null -@@ -1,78 +0,0 @@ --package thrift -- --import ""fmt"" -- --type TStandardClient struct { -- seqId int32 -- iprot, oprot TProtocol --} -- --// TStandardClient implements TClient, and uses the standard message format for Thrift. --// It is not safe for concurrent use. --func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient { -- return &TStandardClient{ -- iprot: inputProtocol, -- oprot: outputProtocol, -- } --} -- --func (p *TStandardClient) Send(oprot TProtocol, seqId int32, method string, args TStruct) error { -- if err := oprot.WriteMessageBegin(method, CALL, seqId); err != nil { -- return err -- } -- if err := args.Write(oprot); err != nil { -- return err -- } -- if err := oprot.WriteMessageEnd(); err != nil { -- return err -- } -- return oprot.Flush() --} -- --func (p *TStandardClient) Recv(iprot TProtocol, seqId int32, method string, result TStruct) error { -- rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin() -- if err != nil { -- return err -- } -- -- if method != rMethod { -- return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf(""%s: wrong method name"", method)) -- } else if seqId != rSeqId { -- return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf(""%s: out of order sequence response"", method)) -- } else if rTypeId == EXCEPTION { -- var exception tApplicationException -- if err := exception.Read(iprot); err != nil { -- return err -- } -- -- if err := iprot.ReadMessageEnd(); err != nil { -- return err -- } -- -- return &exception -- } else if rTypeId != REPLY { -- return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf(""%s: invalid message type"", method)) -- } -- -- if err := result.Read(iprot); err != nil { -- return err -- } -- -- return iprot.ReadMessageEnd() --} -- --func (p *TStandardClient) call(method string, args, result TStruct) error { -- p.seqId++ -- seqId := p.seqId -- -- if err := p.Send(p.oprot, seqId, method, args); err != nil { -- return err -- } -- -- // method is oneway -- if result == nil { -- return nil -- } -- -- return p.Recv(p.iprot, seqId, method, result) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/client_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/client_go17.go -deleted file mode 100644 -index 15c1c52ca69d1..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/client_go17.go -+++ /dev/null -@@ -1,13 +0,0 @@ --// +build go1.7 -- --package thrift -- --import ""context"" -- --type TClient interface { -- Call(ctx context.Context, method string, args, result TStruct) error --} -- --func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) error { -- return p.call(method, args, result) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/client_pre_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/client_pre_go17.go -deleted file mode 100644 -index d2e99ef2af9a6..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/client_pre_go17.go -+++ /dev/null -@@ -1,13 +0,0 @@ --// +build !go1.7 -- --package thrift -- --import ""golang.org/x/net/context"" -- --type TClient interface { -- Call(ctx context.Context, method string, args, result TStruct) error --} -- --func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) error { -- return p.call(method, args, result) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/common_test_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/common_test_go17.go -deleted file mode 100644 -index 2c729a2269e7d..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/common_test_go17.go -+++ /dev/null -@@ -1,32 +0,0 @@ --// +build go1.7 -- --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ""context"" -- --type mockProcessor struct { -- ProcessFunc func(in, out TProtocol) (bool, TException) --} -- --func (m *mockProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { -- return m.ProcessFunc(in, out) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/common_test_pre_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/common_test_pre_go17.go -deleted file mode 100644 -index e6d0c4d9afb11..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/common_test_pre_go17.go -+++ /dev/null -@@ -1,32 +0,0 @@ --// +build !go1.7 -- --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ""golang.org/x/net/context"" -- --type mockProcessor struct { -- ProcessFunc func(in, out TProtocol) (bool, TException) --} -- --func (m *mockProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { -- return m.ProcessFunc(in, out) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go -deleted file mode 100644 -index 0bc5fddeb3803..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go -+++ /dev/null -@@ -1,815 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""encoding/binary"" -- ""fmt"" -- ""io"" -- ""math"" --) -- --const ( -- COMPACT_PROTOCOL_ID = 0x082 -- COMPACT_VERSION = 1 -- COMPACT_VERSION_MASK = 0x1f -- COMPACT_TYPE_MASK = 0x0E0 -- COMPACT_TYPE_BITS = 0x07 -- COMPACT_TYPE_SHIFT_AMOUNT = 5 --) -- --type tCompactType byte -- --const ( -- COMPACT_BOOLEAN_TRUE = 0x01 -- COMPACT_BOOLEAN_FALSE = 0x02 -- COMPACT_BYTE = 0x03 -- COMPACT_I16 = 0x04 -- COMPACT_I32 = 0x05 -- COMPACT_I64 = 0x06 -- COMPACT_DOUBLE = 0x07 -- COMPACT_BINARY = 0x08 -- COMPACT_LIST = 0x09 -- COMPACT_SET = 0x0A -- COMPACT_MAP = 0x0B -- COMPACT_STRUCT = 0x0C --) -- --var ( -- ttypeToCompactType map[TType]tCompactType --) -- --func init() { -- ttypeToCompactType = map[TType]tCompactType{ -- STOP: STOP, -- BOOL: COMPACT_BOOLEAN_TRUE, -- BYTE: COMPACT_BYTE, -- I16: COMPACT_I16, -- I32: COMPACT_I32, -- I64: COMPACT_I64, -- DOUBLE: COMPACT_DOUBLE, -- STRING: COMPACT_BINARY, -- LIST: COMPACT_LIST, -- SET: COMPACT_SET, -- MAP: COMPACT_MAP, -- STRUCT: COMPACT_STRUCT, -- } --} -- --type TCompactProtocolFactory struct{} -- --func NewTCompactProtocolFactory() *TCompactProtocolFactory { -- return &TCompactProtocolFactory{} --} -- --func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol { -- return NewTCompactProtocol(trans) --} -- --type TCompactProtocol struct { -- trans TRichTransport -- origTransport TTransport -- -- // Used to keep track of the last field for the current and previous structs, -- // so we can do the delta stuff. -- lastField []int -- lastFieldId int -- -- // If we encounter a boolean field begin, save the TField here so it can -- // have the value incorporated. -- booleanFieldName string -- booleanFieldId int16 -- booleanFieldPending bool -- -- // If we read a field header, and it's a boolean field, save the boolean -- // value here so that readBool can use it. -- boolValue bool -- boolValueIsNotNull bool -- buffer [64]byte --} -- --// Create a TCompactProtocol given a TTransport --func NewTCompactProtocol(trans TTransport) *TCompactProtocol { -- p := &TCompactProtocol{origTransport: trans, lastField: []int{}} -- if et, ok := trans.(TRichTransport); ok { -- p.trans = et -- } else { -- p.trans = NewTRichTransport(trans) -- } -- -- return p -- --} -- --// --// Public Writing methods. --// -- --// Write a message header to the wire. Compact Protocol messages contain the --// protocol version so we can migrate forwards in the future if need be. --func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { -- err := p.writeByteDirect(COMPACT_PROTOCOL_ID) -- if err != nil { -- return NewTProtocolException(err) -- } -- err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK)) -- if err != nil { -- return NewTProtocolException(err) -- } -- _, err = p.writeVarint32(seqid) -- if err != nil { -- return NewTProtocolException(err) -- } -- e := p.WriteString(name) -- return e -- --} -- --func (p *TCompactProtocol) WriteMessageEnd() error { return nil } -- --// Write a struct begin. This doesn't actually put anything on the wire. We --// use it as an opportunity to put special placeholder markers on the field --// stack so we can get the field id deltas correct. --func (p *TCompactProtocol) WriteStructBegin(name string) error { -- p.lastField = append(p.lastField, p.lastFieldId) -- p.lastFieldId = 0 -- return nil --} -- --// Write a struct end. This doesn't actually put anything on the wire. We use --// this as an opportunity to pop the last field from the current struct off --// of the field stack. --func (p *TCompactProtocol) WriteStructEnd() error { -- p.lastFieldId = p.lastField[len(p.lastField)-1] -- p.lastField = p.lastField[:len(p.lastField)-1] -- return nil --} -- --func (p *TCompactProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { -- if typeId == BOOL { -- // we want to possibly include the value, so we'll wait. -- p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true -- return nil -- } -- _, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF) -- return NewTProtocolException(err) --} -- --// The workhorse of writeFieldBegin. It has the option of doing a --// 'type override' of the type header. This is used specifically in the --// boolean field case. --func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id int16, typeOverride byte) (int, error) { -- // short lastField = lastField_.pop(); -- -- // if there's a type override, use that. -- var typeToWrite byte -- if typeOverride == 0xFF { -- typeToWrite = byte(p.getCompactType(typeId)) -- } else { -- typeToWrite = typeOverride -- } -- // check if we can use delta encoding for the field id -- fieldId := int(id) -- written := 0 -- if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 { -- // write them together -- err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite) -- if err != nil { -- return 0, err -- } -- } else { -- // write them separate -- err := p.writeByteDirect(typeToWrite) -- if err != nil { -- return 0, err -- } -- err = p.WriteI16(id) -- written = 1 + 2 -- if err != nil { -- return 0, err -- } -- } -- -- p.lastFieldId = fieldId -- // p.lastField.Push(field.id); -- return written, nil --} -- --func (p *TCompactProtocol) WriteFieldEnd() error { return nil } -- --func (p *TCompactProtocol) WriteFieldStop() error { -- err := p.writeByteDirect(STOP) -- return NewTProtocolException(err) --} -- --func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { -- if size == 0 { -- err := p.writeByteDirect(0) -- return NewTProtocolException(err) -- } -- _, err := p.writeVarint32(int32(size)) -- if err != nil { -- return NewTProtocolException(err) -- } -- err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType))) -- return NewTProtocolException(err) --} -- --func (p *TCompactProtocol) WriteMapEnd() error { return nil } -- --// Write a list header. --func (p *TCompactProtocol) WriteListBegin(elemType TType, size int) error { -- _, err := p.writeCollectionBegin(elemType, size) -- return NewTProtocolException(err) --} -- --func (p *TCompactProtocol) WriteListEnd() error { return nil } -- --// Write a set header. --func (p *TCompactProtocol) WriteSetBegin(elemType TType, size int) error { -- _, err := p.writeCollectionBegin(elemType, size) -- return NewTProtocolException(err) --} -- --func (p *TCompactProtocol) WriteSetEnd() error { return nil } -- --func (p *TCompactProtocol) WriteBool(value bool) error { -- v := byte(COMPACT_BOOLEAN_FALSE) -- if value { -- v = byte(COMPACT_BOOLEAN_TRUE) -- } -- if p.booleanFieldPending { -- // we haven't written the field header yet -- _, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v) -- p.booleanFieldPending = false -- return NewTProtocolException(err) -- } -- // we're not part of a field, so just write the value. -- err := p.writeByteDirect(v) -- return NewTProtocolException(err) --} -- --// Write a byte. Nothing to see here! --func (p *TCompactProtocol) WriteByte(value int8) error { -- err := p.writeByteDirect(byte(value)) -- return NewTProtocolException(err) --} -- --// Write an I16 as a zigzag varint. --func (p *TCompactProtocol) WriteI16(value int16) error { -- _, err := p.writeVarint32(p.int32ToZigzag(int32(value))) -- return NewTProtocolException(err) --} -- --// Write an i32 as a zigzag varint. --func (p *TCompactProtocol) WriteI32(value int32) error { -- _, err := p.writeVarint32(p.int32ToZigzag(value)) -- return NewTProtocolException(err) --} -- --// Write an i64 as a zigzag varint. --func (p *TCompactProtocol) WriteI64(value int64) error { -- _, err := p.writeVarint64(p.int64ToZigzag(value)) -- return NewTProtocolException(err) --} -- --// Write a double to the wire as 8 bytes. --func (p *TCompactProtocol) WriteDouble(value float64) error { -- buf := p.buffer[0:8] -- binary.LittleEndian.PutUint64(buf, math.Float64bits(value)) -- _, err := p.trans.Write(buf) -- return NewTProtocolException(err) --} -- --// Write a string to the wire with a varint size preceding. --func (p *TCompactProtocol) WriteString(value string) error { -- _, e := p.writeVarint32(int32(len(value))) -- if e != nil { -- return NewTProtocolException(e) -- } -- if len(value) > 0 { -- } -- _, e = p.trans.WriteString(value) -- return e --} -- --// Write a byte array, using a varint for the size. --func (p *TCompactProtocol) WriteBinary(bin []byte) error { -- _, e := p.writeVarint32(int32(len(bin))) -- if e != nil { -- return NewTProtocolException(e) -- } -- if len(bin) > 0 { -- _, e = p.trans.Write(bin) -- return NewTProtocolException(e) -- } -- return nil --} -- --// --// Reading methods. --// -- --// Read a message header. --func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { -- -- protocolId, err := p.readByteDirect() -- if err != nil { -- return -- } -- -- if protocolId != COMPACT_PROTOCOL_ID { -- e := fmt.Errorf(""Expected protocol id %02x but got %02x"", COMPACT_PROTOCOL_ID, protocolId) -- return """", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e) -- } -- -- versionAndType, err := p.readByteDirect() -- if err != nil { -- return -- } -- -- version := versionAndType & COMPACT_VERSION_MASK -- typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS) -- if version != COMPACT_VERSION { -- e := fmt.Errorf(""Expected version %02x but got %02x"", COMPACT_VERSION, version) -- err = NewTProtocolExceptionWithType(BAD_VERSION, e) -- return -- } -- seqId, e := p.readVarint32() -- if e != nil { -- err = NewTProtocolException(e) -- return -- } -- name, err = p.ReadString() -- return --} -- --func (p *TCompactProtocol) ReadMessageEnd() error { return nil } -- --// Read a struct begin. There's nothing on the wire for this, but it is our --// opportunity to push a new struct begin marker onto the field stack. --func (p *TCompactProtocol) ReadStructBegin() (name string, err error) { -- p.lastField = append(p.lastField, p.lastFieldId) -- p.lastFieldId = 0 -- return --} -- --// Doesn't actually consume any wire data, just removes the last field for --// this struct from the field stack. --func (p *TCompactProtocol) ReadStructEnd() error { -- // consume the last field we read off the wire. -- p.lastFieldId = p.lastField[len(p.lastField)-1] -- p.lastField = p.lastField[:len(p.lastField)-1] -- return nil --} -- --// Read a field header off the wire. --func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) { -- t, err := p.readByteDirect() -- if err != nil { -- return -- } -- -- // if it's a stop, then we can return immediately, as the struct is over. -- if (t & 0x0f) == STOP { -- return """", STOP, 0, nil -- } -- -- // mask off the 4 MSB of the type header. it could contain a field id delta. -- modifier := int16((t & 0xf0) >> 4) -- if modifier == 0 { -- // not a delta. look ahead for the zigzag varint field id. -- id, err = p.ReadI16() -- if err != nil { -- return -- } -- } else { -- // has a delta. add the delta to the last read field id. -- id = int16(p.lastFieldId) + modifier -- } -- typeId, e := p.getTType(tCompactType(t & 0x0f)) -- if e != nil { -- err = NewTProtocolException(e) -- return -- } -- -- // if this happens to be a boolean field, the value is encoded in the type -- if p.isBoolType(t) { -- // save the boolean value in a special instance variable. -- p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE) -- p.boolValueIsNotNull = true -- } -- -- // push the new field onto the field stack so we can keep the deltas going. -- p.lastFieldId = int(id) -- return --} -- --func (p *TCompactProtocol) ReadFieldEnd() error { return nil } -- --// Read a map header off the wire. If the size is zero, skip reading the key --// and value type. This means that 0-length maps will yield TMaps without the --// ""correct"" types. --func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) { -- size32, e := p.readVarint32() -- if e != nil { -- err = NewTProtocolException(e) -- return -- } -- if size32 < 0 { -- err = invalidDataLength -- return -- } -- size = int(size32) -- -- keyAndValueType := byte(STOP) -- if size != 0 { -- keyAndValueType, err = p.readByteDirect() -- if err != nil { -- return -- } -- } -- keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4)) -- valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf)) -- return --} -- --func (p *TCompactProtocol) ReadMapEnd() error { return nil } -- --// Read a list header off the wire. If the list size is 0-14, the size will --// be packed into the element type header. If it's a longer list, the 4 MSB --// of the element type header will be 0xF, and a varint will follow with the --// true size. --func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) { -- size_and_type, err := p.readByteDirect() -- if err != nil { -- return -- } -- size = int((size_and_type >> 4) & 0x0f) -- if size == 15 { -- size2, e := p.readVarint32() -- if e != nil { -- err = NewTProtocolException(e) -- return -- } -- if size2 < 0 { -- err = invalidDataLength -- return -- } -- size = int(size2) -- } -- elemType, e := p.getTType(tCompactType(size_and_type)) -- if e != nil { -- err = NewTProtocolException(e) -- return -- } -- return --} -- --func (p *TCompactProtocol) ReadListEnd() error { return nil } -- --// Read a set header off the wire. If the set size is 0-14, the size will --// be packed into the element type header. If it's a longer set, the 4 MSB --// of the element type header will be 0xF, and a varint will follow with the --// true size. --func (p *TCompactProtocol) ReadSetBegin() (elemType TType, size int, err error) { -- return p.ReadListBegin() --} -- --func (p *TCompactProtocol) ReadSetEnd() error { return nil } -- --// Read a boolean off the wire. If this is a boolean field, the value should --// already have been read during readFieldBegin, so we'll just consume the --// pre-stored value. Otherwise, read a byte. --func (p *TCompactProtocol) ReadBool() (value bool, err error) { -- if p.boolValueIsNotNull { -- p.boolValueIsNotNull = false -- return p.boolValue, nil -- } -- v, err := p.readByteDirect() -- return v == COMPACT_BOOLEAN_TRUE, err --} -- --// Read a single byte off the wire. Nothing interesting here. --func (p *TCompactProtocol) ReadByte() (int8, error) { -- v, err := p.readByteDirect() -- if err != nil { -- return 0, NewTProtocolException(err) -- } -- return int8(v), err --} -- --// Read an i16 from the wire as a zigzag varint. --func (p *TCompactProtocol) ReadI16() (value int16, err error) { -- v, err := p.ReadI32() -- return int16(v), err --} -- --// Read an i32 from the wire as a zigzag varint. --func (p *TCompactProtocol) ReadI32() (value int32, err error) { -- v, e := p.readVarint32() -- if e != nil { -- return 0, NewTProtocolException(e) -- } -- value = p.zigzagToInt32(v) -- return value, nil --} -- --// Read an i64 from the wire as a zigzag varint. --func (p *TCompactProtocol) ReadI64() (value int64, err error) { -- v, e := p.readVarint64() -- if e != nil { -- return 0, NewTProtocolException(e) -- } -- value = p.zigzagToInt64(v) -- return value, nil --} -- --// No magic here - just read a double off the wire. --func (p *TCompactProtocol) ReadDouble() (value float64, err error) { -- longBits := p.buffer[0:8] -- _, e := io.ReadFull(p.trans, longBits) -- if e != nil { -- return 0.0, NewTProtocolException(e) -- } -- return math.Float64frombits(p.bytesToUint64(longBits)), nil --} -- --// Reads a []byte (via readBinary), and then UTF-8 decodes it. --func (p *TCompactProtocol) ReadString() (value string, err error) { -- length, e := p.readVarint32() -- if e != nil { -- return """", NewTProtocolException(e) -- } -- if length < 0 { -- return """", invalidDataLength -- } -- if uint64(length) > p.trans.RemainingBytes() { -- return """", invalidDataLength -- } -- -- if length == 0 { -- return """", nil -- } -- var buf []byte -- if length <= int32(len(p.buffer)) { -- buf = p.buffer[0:length] -- } else { -- buf = make([]byte, length) -- } -- _, e = io.ReadFull(p.trans, buf) -- return string(buf), NewTProtocolException(e) --} -- --// Read a []byte from the wire. --func (p *TCompactProtocol) ReadBinary() (value []byte, err error) { -- length, e := p.readVarint32() -- if e != nil { -- return nil, NewTProtocolException(e) -- } -- if length == 0 { -- return []byte{}, nil -- } -- if length < 0 { -- return nil, invalidDataLength -- } -- if uint64(length) > p.trans.RemainingBytes() { -- return nil, invalidDataLength -- } -- -- buf := make([]byte, length) -- _, e = io.ReadFull(p.trans, buf) -- return buf, NewTProtocolException(e) --} -- --func (p *TCompactProtocol) Flush() (err error) { -- return NewTProtocolException(p.trans.Flush()) --} -- --func (p *TCompactProtocol) Skip(fieldType TType) (err error) { -- return SkipDefaultDepth(p, fieldType) --} -- --func (p *TCompactProtocol) Transport() TTransport { -- return p.origTransport --} -- --// --// Internal writing methods --// -- --// Abstract method for writing the start of lists and sets. List and sets on --// the wire differ only by the type indicator. --func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) { -- if size <= 14 { -- return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType)))) -- } -- err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType))) -- if err != nil { -- return 0, err -- } -- m, err := p.writeVarint32(int32(size)) -- return 1 + m, err --} -- --// Write an i32 as a varint. Results in 1-5 bytes on the wire. --// TODO(pomack): make a permanent buffer like writeVarint64? --func (p *TCompactProtocol) writeVarint32(n int32) (int, error) { -- i32buf := p.buffer[0:5] -- idx := 0 -- for { -- if (n & ^0x7F) == 0 { -- i32buf[idx] = byte(n) -- idx++ -- // p.writeByteDirect(byte(n)); -- break -- // return; -- } else { -- i32buf[idx] = byte((n & 0x7F) | 0x80) -- idx++ -- // p.writeByteDirect(byte(((n & 0x7F) | 0x80))); -- u := uint32(n) -- n = int32(u >> 7) -- } -- } -- return p.trans.Write(i32buf[0:idx]) --} -- --// Write an i64 as a varint. Results in 1-10 bytes on the wire. --func (p *TCompactProtocol) writeVarint64(n int64) (int, error) { -- varint64out := p.buffer[0:10] -- idx := 0 -- for { -- if (n & ^0x7F) == 0 { -- varint64out[idx] = byte(n) -- idx++ -- break -- } else { -- varint64out[idx] = byte((n & 0x7F) | 0x80) -- idx++ -- u := uint64(n) -- n = int64(u >> 7) -- } -- } -- return p.trans.Write(varint64out[0:idx]) --} -- --// Convert l into a zigzag long. This allows negative numbers to be --// represented compactly as a varint. --func (p *TCompactProtocol) int64ToZigzag(l int64) int64 { -- return (l << 1) ^ (l >> 63) --} -- --// Convert l into a zigzag long. This allows negative numbers to be --// represented compactly as a varint. --func (p *TCompactProtocol) int32ToZigzag(n int32) int32 { -- return (n << 1) ^ (n >> 31) --} -- --func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) { -- binary.LittleEndian.PutUint64(buf, n) --} -- --func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) { -- binary.LittleEndian.PutUint64(buf, uint64(n)) --} -- --// Writes a byte without any possibility of all that field header nonsense. --// Used internally by other writing methods that know they need to write a byte. --func (p *TCompactProtocol) writeByteDirect(b byte) error { -- return p.trans.WriteByte(b) --} -- --// Writes a byte without any possibility of all that field header nonsense. --func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) { -- return 1, p.writeByteDirect(byte(n)) --} -- --// --// Internal reading methods --// -- --// Read an i32 from the wire as a varint. The MSB of each byte is set --// if there is another byte to follow. This can read up to 5 bytes. --func (p *TCompactProtocol) readVarint32() (int32, error) { -- // if the wire contains the right stuff, this will just truncate the i64 we -- // read and get us the right sign. -- v, err := p.readVarint64() -- return int32(v), err --} -- --// Read an i64 from the wire as a proper varint. The MSB of each byte is set --// if there is another byte to follow. This can read up to 10 bytes. --func (p *TCompactProtocol) readVarint64() (int64, error) { -- shift := uint(0) -- result := int64(0) -- for { -- b, err := p.readByteDirect() -- if err != nil { -- return 0, err -- } -- result |= int64(b&0x7f) << shift -- if (b & 0x80) != 0x80 { -- break -- } -- shift += 7 -- } -- return result, nil --} -- --// Read a byte, unlike ReadByte that reads Thrift-byte that is i8. --func (p *TCompactProtocol) readByteDirect() (byte, error) { -- return p.trans.ReadByte() --} -- --// --// encoding helpers --// -- --// Convert from zigzag int to int. --func (p *TCompactProtocol) zigzagToInt32(n int32) int32 { -- u := uint32(n) -- return int32(u>>1) ^ -(n & 1) --} -- --// Convert from zigzag long to long. --func (p *TCompactProtocol) zigzagToInt64(n int64) int64 { -- u := uint64(n) -- return int64(u>>1) ^ -(n & 1) --} -- --// Note that it's important that the mask bytes are long literals, --// otherwise they'll default to ints, and when you shift an int left 56 bits, --// you just get a messed up int. --func (p *TCompactProtocol) bytesToInt64(b []byte) int64 { -- return int64(binary.LittleEndian.Uint64(b)) --} -- --// Note that it's important that the mask bytes are long literals, --// otherwise they'll default to ints, and when you shift an int left 56 bits, --// you just get a messed up int. --func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 { -- return binary.LittleEndian.Uint64(b) --} -- --// --// type testing and converting --// -- --func (p *TCompactProtocol) isBoolType(b byte) bool { -- return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE --} -- --// Given a tCompactType constant, convert it to its corresponding --// TType value. --func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) { -- switch byte(t) & 0x0f { -- case STOP: -- return STOP, nil -- case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE: -- return BOOL, nil -- case COMPACT_BYTE: -- return BYTE, nil -- case COMPACT_I16: -- return I16, nil -- case COMPACT_I32: -- return I32, nil -- case COMPACT_I64: -- return I64, nil -- case COMPACT_DOUBLE: -- return DOUBLE, nil -- case COMPACT_BINARY: -- return STRING, nil -- case COMPACT_LIST: -- return LIST, nil -- case COMPACT_SET: -- return SET, nil -- case COMPACT_MAP: -- return MAP, nil -- case COMPACT_STRUCT: -- return STRUCT, nil -- } -- return STOP, TException(fmt.Errorf(""don't know what type: %s"", t&0x0f)) --} -- --// Given a TType value, find the appropriate TCompactProtocol.Types constant. --func (p *TCompactProtocol) getCompactType(t TType) tCompactType { -- return ttypeToCompactType[t] --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go -deleted file mode 100644 -index d37252cc61d4b..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go -+++ /dev/null -@@ -1,269 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""log"" --) -- --type TDebugProtocol struct { -- Delegate TProtocol -- LogPrefix string --} -- --type TDebugProtocolFactory struct { -- Underlying TProtocolFactory -- LogPrefix string --} -- --func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory { -- return &TDebugProtocolFactory{ -- Underlying: underlying, -- LogPrefix: logPrefix, -- } --} -- --func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol { -- return &TDebugProtocol{ -- Delegate: t.Underlying.GetProtocol(trans), -- LogPrefix: t.LogPrefix, -- } --} -- --func (tdp *TDebugProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { -- err := tdp.Delegate.WriteMessageBegin(name, typeId, seqid) -- log.Printf(""%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v"", tdp.LogPrefix, name, typeId, seqid, err) -- return err --} --func (tdp *TDebugProtocol) WriteMessageEnd() error { -- err := tdp.Delegate.WriteMessageEnd() -- log.Printf(""%sWriteMessageEnd() => %#v"", tdp.LogPrefix, err) -- return err --} --func (tdp *TDebugProtocol) WriteStructBegin(name string) error { -- err := tdp.Delegate.WriteStructBegin(name) -- log.Printf(""%sWriteStructBegin(name=%#v) => %#v"", tdp.LogPrefix, name, err) -- return err --} --func (tdp *TDebugProtocol) WriteStructEnd() error { -- err := tdp.Delegate.WriteStructEnd() -- log.Printf(""%sWriteStructEnd() => %#v"", tdp.LogPrefix, err) -- return err --} --func (tdp *TDebugProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { -- err := tdp.Delegate.WriteFieldBegin(name, typeId, id) -- log.Printf(""%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v"", tdp.LogPrefix, name, typeId, id, err) -- return err --} --func (tdp *TDebugProtocol) WriteFieldEnd() error { -- err := tdp.Delegate.WriteFieldEnd() -- log.Printf(""%sWriteFieldEnd() => %#v"", tdp.LogPrefix, err) -- return err --} --func (tdp *TDebugProtocol) WriteFieldStop() error { -- err := tdp.Delegate.WriteFieldStop() -- log.Printf(""%sWriteFieldStop() => %#v"", tdp.LogPrefix, err) -- return err --} --func (tdp *TDebugProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { -- err := tdp.Delegate.WriteMapBegin(keyType, valueType, size) -- log.Printf(""%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v"", tdp.LogPrefix, keyType, valueType, size, err) -- return err --} --func (tdp *TDebugProtocol) WriteMapEnd() error { -- err := tdp.Delegate.WriteMapEnd() -- log.Printf(""%sWriteMapEnd() => %#v"", tdp.LogPrefix, err) -- return err --} --func (tdp *TDebugProtocol) WriteListBegin(elemType TType, size int) error { -- err := tdp.Delegate.WriteListBegin(elemType, size) -- log.Printf(""%sWriteListBegin(elemType=%#v, size=%#v) => %#v"", tdp.LogPrefix, elemType, size, err) -- return err --} --func (tdp *TDebugProtocol) WriteListEnd() error { -- err := tdp.Delegate.WriteListEnd() -- log.Printf(""%sWriteListEnd() => %#v"", tdp.LogPrefix, err) -- return err --} --func (tdp *TDebugProtocol) WriteSetBegin(elemType TType, size int) error { -- err := tdp.Delegate.WriteSetBegin(elemType, size) -- log.Printf(""%sWriteSetBegin(elemType=%#v, size=%#v) => %#v"", tdp.LogPrefix, elemType, size, err) -- return err --} --func (tdp *TDebugProtocol) WriteSetEnd() error { -- err := tdp.Delegate.WriteSetEnd() -- log.Printf(""%sWriteSetEnd() => %#v"", tdp.LogPrefix, err) -- return err --} --func (tdp *TDebugProtocol) WriteBool(value bool) error { -- err := tdp.Delegate.WriteBool(value) -- log.Printf(""%sWriteBool(value=%#v) => %#v"", tdp.LogPrefix, value, err) -- return err --} --func (tdp *TDebugProtocol) WriteByte(value int8) error { -- err := tdp.Delegate.WriteByte(value) -- log.Printf(""%sWriteByte(value=%#v) => %#v"", tdp.LogPrefix, value, err) -- return err --} --func (tdp *TDebugProtocol) WriteI16(value int16) error { -- err := tdp.Delegate.WriteI16(value) -- log.Printf(""%sWriteI16(value=%#v) => %#v"", tdp.LogPrefix, value, err) -- return err --} --func (tdp *TDebugProtocol) WriteI32(value int32) error { -- err := tdp.Delegate.WriteI32(value) -- log.Printf(""%sWriteI32(value=%#v) => %#v"", tdp.LogPrefix, value, err) -- return err --} --func (tdp *TDebugProtocol) WriteI64(value int64) error { -- err := tdp.Delegate.WriteI64(value) -- log.Printf(""%sWriteI64(value=%#v) => %#v"", tdp.LogPrefix, value, err) -- return err --} --func (tdp *TDebugProtocol) WriteDouble(value float64) error { -- err := tdp.Delegate.WriteDouble(value) -- log.Printf(""%sWriteDouble(value=%#v) => %#v"", tdp.LogPrefix, value, err) -- return err --} --func (tdp *TDebugProtocol) WriteString(value string) error { -- err := tdp.Delegate.WriteString(value) -- log.Printf(""%sWriteString(value=%#v) => %#v"", tdp.LogPrefix, value, err) -- return err --} --func (tdp *TDebugProtocol) WriteBinary(value []byte) error { -- err := tdp.Delegate.WriteBinary(value) -- log.Printf(""%sWriteBinary(value=%#v) => %#v"", tdp.LogPrefix, value, err) -- return err --} -- --func (tdp *TDebugProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) { -- name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin() -- log.Printf(""%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)"", tdp.LogPrefix, name, typeId, seqid, err) -- return --} --func (tdp *TDebugProtocol) ReadMessageEnd() (err error) { -- err = tdp.Delegate.ReadMessageEnd() -- log.Printf(""%sReadMessageEnd() err=%#v"", tdp.LogPrefix, err) -- return --} --func (tdp *TDebugProtocol) ReadStructBegin() (name string, err error) { -- name, err = tdp.Delegate.ReadStructBegin() -- log.Printf(""%sReadStructBegin() (name%#v, err=%#v)"", tdp.LogPrefix, name, err) -- return --} --func (tdp *TDebugProtocol) ReadStructEnd() (err error) { -- err = tdp.Delegate.ReadStructEnd() -- log.Printf(""%sReadStructEnd() err=%#v"", tdp.LogPrefix, err) -- return --} --func (tdp *TDebugProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) { -- name, typeId, id, err = tdp.Delegate.ReadFieldBegin() -- log.Printf(""%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)"", tdp.LogPrefix, name, typeId, id, err) -- return --} --func (tdp *TDebugProtocol) ReadFieldEnd() (err error) { -- err = tdp.Delegate.ReadFieldEnd() -- log.Printf(""%sReadFieldEnd() err=%#v"", tdp.LogPrefix, err) -- return --} --func (tdp *TDebugProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) { -- keyType, valueType, size, err = tdp.Delegate.ReadMapBegin() -- log.Printf(""%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)"", tdp.LogPrefix, keyType, valueType, size, err) -- return --} --func (tdp *TDebugProtocol) ReadMapEnd() (err error) { -- err = tdp.Delegate.ReadMapEnd() -- log.Printf(""%sReadMapEnd() err=%#v"", tdp.LogPrefix, err) -- return --} --func (tdp *TDebugProtocol) ReadListBegin() (elemType TType, size int, err error) { -- elemType, size, err = tdp.Delegate.ReadListBegin() -- log.Printf(""%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)"", tdp.LogPrefix, elemType, size, err) -- return --} --func (tdp *TDebugProtocol) ReadListEnd() (err error) { -- err = tdp.Delegate.ReadListEnd() -- log.Printf(""%sReadListEnd() err=%#v"", tdp.LogPrefix, err) -- return --} --func (tdp *TDebugProtocol) ReadSetBegin() (elemType TType, size int, err error) { -- elemType, size, err = tdp.Delegate.ReadSetBegin() -- log.Printf(""%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)"", tdp.LogPrefix, elemType, size, err) -- return --} --func (tdp *TDebugProtocol) ReadSetEnd() (err error) { -- err = tdp.Delegate.ReadSetEnd() -- log.Printf(""%sReadSetEnd() err=%#v"", tdp.LogPrefix, err) -- return --} --func (tdp *TDebugProtocol) ReadBool() (value bool, err error) { -- value, err = tdp.Delegate.ReadBool() -- log.Printf(""%sReadBool() (value=%#v, err=%#v)"", tdp.LogPrefix, value, err) -- return --} --func (tdp *TDebugProtocol) ReadByte() (value int8, err error) { -- value, err = tdp.Delegate.ReadByte() -- log.Printf(""%sReadByte() (value=%#v, err=%#v)"", tdp.LogPrefix, value, err) -- return --} --func (tdp *TDebugProtocol) ReadI16() (value int16, err error) { -- value, err = tdp.Delegate.ReadI16() -- log.Printf(""%sReadI16() (value=%#v, err=%#v)"", tdp.LogPrefix, value, err) -- return --} --func (tdp *TDebugProtocol) ReadI32() (value int32, err error) { -- value, err = tdp.Delegate.ReadI32() -- log.Printf(""%sReadI32() (value=%#v, err=%#v)"", tdp.LogPrefix, value, err) -- return --} --func (tdp *TDebugProtocol) ReadI64() (value int64, err error) { -- value, err = tdp.Delegate.ReadI64() -- log.Printf(""%sReadI64() (value=%#v, err=%#v)"", tdp.LogPrefix, value, err) -- return --} --func (tdp *TDebugProtocol) ReadDouble() (value float64, err error) { -- value, err = tdp.Delegate.ReadDouble() -- log.Printf(""%sReadDouble() (value=%#v, err=%#v)"", tdp.LogPrefix, value, err) -- return --} --func (tdp *TDebugProtocol) ReadString() (value string, err error) { -- value, err = tdp.Delegate.ReadString() -- log.Printf(""%sReadString() (value=%#v, err=%#v)"", tdp.LogPrefix, value, err) -- return --} --func (tdp *TDebugProtocol) ReadBinary() (value []byte, err error) { -- value, err = tdp.Delegate.ReadBinary() -- log.Printf(""%sReadBinary() (value=%#v, err=%#v)"", tdp.LogPrefix, value, err) -- return --} --func (tdp *TDebugProtocol) Skip(fieldType TType) (err error) { -- err = tdp.Delegate.Skip(fieldType) -- log.Printf(""%sSkip(fieldType=%#v) (err=%#v)"", tdp.LogPrefix, fieldType, err) -- return --} --func (tdp *TDebugProtocol) Flush() (err error) { -- err = tdp.Delegate.Flush() -- log.Printf(""%sFlush() (err=%#v)"", tdp.LogPrefix, err) -- return --} -- --func (tdp *TDebugProtocol) Transport() TTransport { -- return tdp.Delegate.Transport() --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go b/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go -deleted file mode 100644 -index 91a0983a4ad05..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go -+++ /dev/null -@@ -1,58 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --type TDeserializer struct { -- Transport TTransport -- Protocol TProtocol --} -- --func NewTDeserializer() *TDeserializer { -- var transport TTransport -- transport = NewTMemoryBufferLen(1024) -- -- protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport) -- -- return &TDeserializer{ -- transport, -- protocol} --} -- --func (t *TDeserializer) ReadString(msg TStruct, s string) (err error) { -- err = nil -- if _, err = t.Transport.Write([]byte(s)); err != nil { -- return -- } -- if err = msg.Read(t.Protocol); err != nil { -- return -- } -- return --} -- --func (t *TDeserializer) Read(msg TStruct, b []byte) (err error) { -- err = nil -- if _, err = t.Transport.Write(b); err != nil { -- return -- } -- if err = msg.Read(t.Protocol); err != nil { -- return -- } -- return --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/exception.go -deleted file mode 100644 -index ea8d6f66114ce..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/exception.go -+++ /dev/null -@@ -1,44 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""errors"" --) -- --// Generic Thrift exception --type TException interface { -- error --} -- --// Prepends additional information to an error without losing the Thrift exception interface --func PrependError(prepend string, err error) error { -- if t, ok := err.(TTransportException); ok { -- return NewTTransportException(t.TypeId(), prepend+t.Error()) -- } -- if t, ok := err.(TProtocolException); ok { -- return NewTProtocolExceptionWithType(t.TypeId(), errors.New(prepend+err.Error())) -- } -- if t, ok := err.(TApplicationException); ok { -- return NewTApplicationException(t.TypeId(), prepend+t.Error()) -- } -- -- return errors.New(prepend + err.Error()) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/field.go b/vendor/github.com/apache/thrift/lib/go/thrift/field.go -deleted file mode 100644 -index 9d66525509776..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/field.go -+++ /dev/null -@@ -1,79 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --// Helper class that encapsulates field metadata. --type field struct { -- name string -- typeId TType -- id int --} -- --func newField(n string, t TType, i int) *field { -- return &field{name: n, typeId: t, id: i} --} -- --func (p *field) Name() string { -- if p == nil { -- return """" -- } -- return p.name --} -- --func (p *field) TypeId() TType { -- if p == nil { -- return TType(VOID) -- } -- return p.typeId --} -- --func (p *field) Id() int { -- if p == nil { -- return -1 -- } -- return p.id --} -- --func (p *field) String() string { -- if p == nil { -- return """" -- } -- return """" --} -- --var ANONYMOUS_FIELD *field -- --type fieldSlice []field -- --func (p fieldSlice) Len() int { -- return len(p) --} -- --func (p fieldSlice) Less(i, j int) bool { -- return p[i].Id() < p[j].Id() --} -- --func (p fieldSlice) Swap(i, j int) { -- p[i], p[j] = p[j], p[i] --} -- --func init() { -- ANONYMOUS_FIELD = newField("""", STOP, 0) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go -deleted file mode 100644 -index 60b1249915a45..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go -+++ /dev/null -@@ -1,172 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""bufio"" -- ""bytes"" -- ""encoding/binary"" -- ""fmt"" -- ""io"" --) -- --const DEFAULT_MAX_LENGTH = 16384000 -- --type TFramedTransport struct { -- transport TTransport -- buf bytes.Buffer -- reader *bufio.Reader -- frameSize uint32 //Current remaining size of the frame. if ==0 read next frame header -- buffer [4]byte -- maxLength uint32 --} -- --type tFramedTransportFactory struct { -- factory TTransportFactory -- maxLength uint32 --} -- --func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory { -- return &tFramedTransportFactory{factory: factory, maxLength: DEFAULT_MAX_LENGTH} --} -- --func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory { -- return &tFramedTransportFactory{factory: factory, maxLength: maxLength} --} -- --func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) { -- tt, err := p.factory.GetTransport(base) -- if err != nil { -- return nil, err -- } -- return NewTFramedTransportMaxLength(tt, p.maxLength), nil --} -- --func NewTFramedTransport(transport TTransport) *TFramedTransport { -- return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: DEFAULT_MAX_LENGTH} --} -- --func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport { -- return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: maxLength} --} -- --func (p *TFramedTransport) Open() error { -- return p.transport.Open() --} -- --func (p *TFramedTransport) IsOpen() bool { -- return p.transport.IsOpen() --} -- --func (p *TFramedTransport) Close() error { -- return p.transport.Close() --} -- --func (p *TFramedTransport) Read(buf []byte) (l int, err error) { -- if p.frameSize == 0 { -- p.frameSize, err = p.readFrameHeader() -- if err != nil { -- return -- } -- } -- if p.frameSize < uint32(len(buf)) { -- frameSize := p.frameSize -- tmp := make([]byte, p.frameSize) -- l, err = p.Read(tmp) -- copy(buf, tmp) -- if err == nil { -- err = NewTTransportExceptionFromError(fmt.Errorf(""Not enough frame size %d to read %d bytes"", frameSize, len(buf))) -- return -- } -- } -- got, err := p.reader.Read(buf) -- p.frameSize = p.frameSize - uint32(got) -- //sanity check -- if p.frameSize < 0 { -- return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, ""Negative frame size"") -- } -- return got, NewTTransportExceptionFromError(err) --} -- --func (p *TFramedTransport) ReadByte() (c byte, err error) { -- if p.frameSize == 0 { -- p.frameSize, err = p.readFrameHeader() -- if err != nil { -- return -- } -- } -- if p.frameSize < 1 { -- return 0, NewTTransportExceptionFromError(fmt.Errorf(""Not enough frame size %d to read %d bytes"", p.frameSize, 1)) -- } -- c, err = p.reader.ReadByte() -- if err == nil { -- p.frameSize-- -- } -- return --} -- --func (p *TFramedTransport) Write(buf []byte) (int, error) { -- n, err := p.buf.Write(buf) -- return n, NewTTransportExceptionFromError(err) --} -- --func (p *TFramedTransport) WriteByte(c byte) error { -- return p.buf.WriteByte(c) --} -- --func (p *TFramedTransport) WriteString(s string) (n int, err error) { -- return p.buf.WriteString(s) --} -- --func (p *TFramedTransport) Flush() error { -- size := p.buf.Len() -- buf := p.buffer[:4] -- binary.BigEndian.PutUint32(buf, uint32(size)) -- _, err := p.transport.Write(buf) -- if err != nil { -- p.buf.Truncate(0) -- return NewTTransportExceptionFromError(err) -- } -- if size > 0 { -- if n, err := p.buf.WriteTo(p.transport); err != nil { -- print(""Error while flushing write buffer of size "", size, "" to transport, only wrote "", n, "" bytes: "", err.Error(), ""\n"") -- p.buf.Truncate(0) -- return NewTTransportExceptionFromError(err) -- } -- } -- err = p.transport.Flush() -- return NewTTransportExceptionFromError(err) --} -- --func (p *TFramedTransport) readFrameHeader() (uint32, error) { -- buf := p.buffer[:4] -- if _, err := io.ReadFull(p.reader, buf); err != nil { -- return 0, err -- } -- size := binary.BigEndian.Uint32(buf) -- if size < 0 || size > p.maxLength { -- return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf(""Incorrect frame size (%d)"", size)) -- } -- return size, nil --} -- --func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) { -- return uint64(p.frameSize) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/go17.go -deleted file mode 100644 -index e3b21c4b73146..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/go17.go -+++ /dev/null -@@ -1,26 +0,0 @@ --// +build go1.7 -- --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ""context"" -- --var defaultCtx = context.Background() -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go b/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go -deleted file mode 100644 -index 33f2aa4b594e1..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go -+++ /dev/null -@@ -1,238 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""bytes"" -- ""io"" -- ""io/ioutil"" -- ""net/http"" -- ""net/url"" -- ""strconv"" --) -- --// Default to using the shared http client. Library users are --// free to change this global client or specify one through --// THttpClientOptions. --var DefaultHttpClient *http.Client = http.DefaultClient -- --type THttpClient struct { -- client *http.Client -- response *http.Response -- url *url.URL -- requestBuffer *bytes.Buffer -- header http.Header -- nsecConnectTimeout int64 -- nsecReadTimeout int64 --} -- --type THttpClientTransportFactory struct { -- options THttpClientOptions -- url string --} -- --func (p *THttpClientTransportFactory) GetTransport(trans TTransport) (TTransport, error) { -- if trans != nil { -- t, ok := trans.(*THttpClient) -- if ok && t.url != nil { -- return NewTHttpClientWithOptions(t.url.String(), p.options) -- } -- } -- return NewTHttpClientWithOptions(p.url, p.options) --} -- --type THttpClientOptions struct { -- // If nil, DefaultHttpClient is used -- Client *http.Client --} -- --func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory { -- return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) --} -- --func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { -- return &THttpClientTransportFactory{url: url, options: options} --} -- --func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { -- parsedURL, err := url.Parse(urlstr) -- if err != nil { -- return nil, err -- } -- buf := make([]byte, 0, 1024) -- client := options.Client -- if client == nil { -- client = DefaultHttpClient -- } -- httpHeader := map[string][]string{""Content-Type"": {""application/x-thrift""}} -- return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil --} -- --func NewTHttpClient(urlstr string) (TTransport, error) { -- return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) --} -- --// Set the HTTP Header for this specific Thrift Transport --// It is important that you first assert the TTransport as a THttpClient type --// like so: --// --// httpTrans := trans.(THttpClient) --// httpTrans.SetHeader(""User-Agent"",""Thrift Client 1.0"") --func (p *THttpClient) SetHeader(key string, value string) { -- p.header.Add(key, value) --} -- --// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport --// It is important that you first assert the TTransport as a THttpClient type --// like so: --// --// httpTrans := trans.(THttpClient) --// hdrValue := httpTrans.GetHeader(""User-Agent"") --func (p *THttpClient) GetHeader(key string) string { -- return p.header.Get(key) --} -- --// Deletes the HTTP Header given a Header Key for this specific Thrift Transport --// It is important that you first assert the TTransport as a THttpClient type --// like so: --// --// httpTrans := trans.(THttpClient) --// httpTrans.DelHeader(""User-Agent"") --func (p *THttpClient) DelHeader(key string) { -- p.header.Del(key) --} -- --func (p *THttpClient) Open() error { -- // do nothing -- return nil --} -- --func (p *THttpClient) IsOpen() bool { -- return p.response != nil || p.requestBuffer != nil --} -- --func (p *THttpClient) closeResponse() error { -- var err error -- if p.response != nil && p.response.Body != nil { -- // The docs specify that if keepalive is enabled and the response body is not -- // read to completion the connection will never be returned to the pool and -- // reused. Errors are being ignored here because if the connection is invalid -- // and this fails for some reason, the Close() method will do any remaining -- // cleanup. -- io.Copy(ioutil.Discard, p.response.Body) -- -- err = p.response.Body.Close() -- } -- -- p.response = nil -- return err --} -- --func (p *THttpClient) Close() error { -- if p.requestBuffer != nil { -- p.requestBuffer.Reset() -- p.requestBuffer = nil -- } -- return p.closeResponse() --} -- --func (p *THttpClient) Read(buf []byte) (int, error) { -- if p.response == nil { -- return 0, NewTTransportException(NOT_OPEN, ""Response buffer is empty, no request."") -- } -- n, err := p.response.Body.Read(buf) -- if n > 0 && (err == nil || err == io.EOF) { -- return n, nil -- } -- return n, NewTTransportExceptionFromError(err) --} -- --func (p *THttpClient) ReadByte() (c byte, err error) { -- return readByte(p.response.Body) --} -- --func (p *THttpClient) Write(buf []byte) (int, error) { -- n, err := p.requestBuffer.Write(buf) -- return n, err --} -- --func (p *THttpClient) WriteByte(c byte) error { -- return p.requestBuffer.WriteByte(c) --} -- --func (p *THttpClient) WriteString(s string) (n int, err error) { -- return p.requestBuffer.WriteString(s) --} -- --func (p *THttpClient) Flush() error { -- // Close any previous response body to avoid leaking connections. -- p.closeResponse() -- -- req, err := http.NewRequest(""POST"", p.url.String(), p.requestBuffer) -- if err != nil { -- return NewTTransportExceptionFromError(err) -- } -- req.Header = p.header -- response, err := p.client.Do(req) -- if err != nil { -- return NewTTransportExceptionFromError(err) -- } -- if response.StatusCode != http.StatusOK { -- // Close the response to avoid leaking file descriptors. closeResponse does -- // more than just call Close(), so temporarily assign it and reuse the logic. -- p.response = response -- p.closeResponse() -- -- // TODO(pomack) log bad response -- return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, ""HTTP Response code: ""+strconv.Itoa(response.StatusCode)) -- } -- p.response = response -- return nil --} -- --func (p *THttpClient) RemainingBytes() (num_bytes uint64) { -- len := p.response.ContentLength -- if len >= 0 { -- return uint64(len) -- } -- -- const maxSize = ^uint64(0) -- return maxSize // the thruth is, we just don't know unless framed is used --} -- --// Deprecated: Use NewTHttpClientTransportFactory instead. --func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory { -- return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) --} -- --// Deprecated: Use NewTHttpClientTransportFactoryWithOptions instead. --func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { -- return NewTHttpClientTransportFactoryWithOptions(url, options) --} -- --// Deprecated: Use NewTHttpClientWithOptions instead. --func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { -- return NewTHttpClientWithOptions(urlstr, options) --} -- --// Deprecated: Use NewTHttpClient instead. --func NewTHttpPostClient(urlstr string) (TTransport, error) { -- return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go -deleted file mode 100644 -index 601855b926ce0..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go -+++ /dev/null -@@ -1,51 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""compress/gzip"" -- ""io"" -- ""net/http"" -- ""strings"" --) -- --// gz transparently compresses the HTTP response if the client supports it. --func gz(handler http.HandlerFunc) http.HandlerFunc { -- return func(w http.ResponseWriter, r *http.Request) { -- if !strings.Contains(r.Header.Get(""Accept-Encoding""), ""gzip"") { -- handler(w, r) -- return -- } -- w.Header().Set(""Content-Encoding"", ""gzip"") -- gz := gzip.NewWriter(w) -- defer gz.Close() -- gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w} -- handler(gzw, r) -- } --} -- --type gzipResponseWriter struct { -- io.Writer -- http.ResponseWriter --} -- --func (w gzipResponseWriter) Write(b []byte) (int, error) { -- return w.Writer.Write(b) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/http_transport_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/http_transport_go17.go -deleted file mode 100644 -index 1313ac225baad..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/http_transport_go17.go -+++ /dev/null -@@ -1,38 +0,0 @@ --// +build go1.7 -- --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""net/http"" --) -- --// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function --func NewThriftHandlerFunc(processor TProcessor, -- inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) { -- -- return gz(func(w http.ResponseWriter, r *http.Request) { -- w.Header().Add(""Content-Type"", ""application/x-thrift"") -- -- transport := NewStreamTransport(r.Body, w) -- processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport)) -- }) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/http_transport_pre_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/http_transport_pre_go17.go -deleted file mode 100644 -index 13aa1c11d1a56..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/http_transport_pre_go17.go -+++ /dev/null -@@ -1,40 +0,0 @@ --// +build !go1.7 -- --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""net/http"" -- -- ""golang.org/x/net/context"" --) -- --// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function --func NewThriftHandlerFunc(processor TProcessor, -- inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) { -- -- return gz(func(w http.ResponseWriter, r *http.Request) { -- w.Header().Add(""Content-Type"", ""application/x-thrift"") -- -- transport := NewStreamTransport(r.Body, w) -- processor.Process(context.Background(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport)) -- }) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go -deleted file mode 100644 -index b18be81c46fec..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go -+++ /dev/null -@@ -1,213 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""bufio"" -- ""io"" --) -- --// StreamTransport is a Transport made of an io.Reader and/or an io.Writer --type StreamTransport struct { -- io.Reader -- io.Writer -- isReadWriter bool -- closed bool --} -- --type StreamTransportFactory struct { -- Reader io.Reader -- Writer io.Writer -- isReadWriter bool --} -- --func (p *StreamTransportFactory) GetTransport(trans TTransport) (TTransport, error) { -- if trans != nil { -- t, ok := trans.(*StreamTransport) -- if ok { -- if t.isReadWriter { -- return NewStreamTransportRW(t.Reader.(io.ReadWriter)), nil -- } -- if t.Reader != nil && t.Writer != nil { -- return NewStreamTransport(t.Reader, t.Writer), nil -- } -- if t.Reader != nil && t.Writer == nil { -- return NewStreamTransportR(t.Reader), nil -- } -- if t.Reader == nil && t.Writer != nil { -- return NewStreamTransportW(t.Writer), nil -- } -- return &StreamTransport{}, nil -- } -- } -- if p.isReadWriter { -- return NewStreamTransportRW(p.Reader.(io.ReadWriter)), nil -- } -- if p.Reader != nil && p.Writer != nil { -- return NewStreamTransport(p.Reader, p.Writer), nil -- } -- if p.Reader != nil && p.Writer == nil { -- return NewStreamTransportR(p.Reader), nil -- } -- if p.Reader == nil && p.Writer != nil { -- return NewStreamTransportW(p.Writer), nil -- } -- return &StreamTransport{}, nil --} -- --func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory { -- return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter} --} -- --func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport { -- return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)} --} -- --func NewStreamTransportR(r io.Reader) *StreamTransport { -- return &StreamTransport{Reader: bufio.NewReader(r)} --} -- --func NewStreamTransportW(w io.Writer) *StreamTransport { -- return &StreamTransport{Writer: bufio.NewWriter(w)} --} -- --func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport { -- bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw)) -- return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true} --} -- --func (p *StreamTransport) IsOpen() bool { -- return !p.closed --} -- --// implicitly opened on creation, can't be reopened once closed --func (p *StreamTransport) Open() error { -- if !p.closed { -- return NewTTransportException(ALREADY_OPEN, ""StreamTransport already open."") -- } else { -- return NewTTransportException(NOT_OPEN, ""cannot reopen StreamTransport."") -- } --} -- --// Closes both the input and output streams. --func (p *StreamTransport) Close() error { -- if p.closed { -- return NewTTransportException(NOT_OPEN, ""StreamTransport already closed."") -- } -- p.closed = true -- closedReader := false -- if p.Reader != nil { -- c, ok := p.Reader.(io.Closer) -- if ok { -- e := c.Close() -- closedReader = true -- if e != nil { -- return e -- } -- } -- p.Reader = nil -- } -- if p.Writer != nil && (!closedReader || !p.isReadWriter) { -- c, ok := p.Writer.(io.Closer) -- if ok { -- e := c.Close() -- if e != nil { -- return e -- } -- } -- p.Writer = nil -- } -- return nil --} -- --// Flushes the underlying output stream if not null. --func (p *StreamTransport) Flush() error { -- if p.Writer == nil { -- return NewTTransportException(NOT_OPEN, ""Cannot flush null outputStream"") -- } -- f, ok := p.Writer.(Flusher) -- if ok { -- err := f.Flush() -- if err != nil { -- return NewTTransportExceptionFromError(err) -- } -- } -- return nil --} -- --func (p *StreamTransport) Read(c []byte) (n int, err error) { -- n, err = p.Reader.Read(c) -- if err != nil { -- err = NewTTransportExceptionFromError(err) -- } -- return --} -- --func (p *StreamTransport) ReadByte() (c byte, err error) { -- f, ok := p.Reader.(io.ByteReader) -- if ok { -- c, err = f.ReadByte() -- } else { -- c, err = readByte(p.Reader) -- } -- if err != nil { -- err = NewTTransportExceptionFromError(err) -- } -- return --} -- --func (p *StreamTransport) Write(c []byte) (n int, err error) { -- n, err = p.Writer.Write(c) -- if err != nil { -- err = NewTTransportExceptionFromError(err) -- } -- return --} -- --func (p *StreamTransport) WriteByte(c byte) (err error) { -- f, ok := p.Writer.(io.ByteWriter) -- if ok { -- err = f.WriteByte(c) -- } else { -- err = writeByte(p.Writer, c) -- } -- if err != nil { -- err = NewTTransportExceptionFromError(err) -- } -- return --} -- --func (p *StreamTransport) WriteString(s string) (n int, err error) { -- f, ok := p.Writer.(stringWriter) -- if ok { -- n, err = f.WriteString(s) -- } else { -- n, err = p.Writer.Write([]byte(s)) -- } -- if err != nil { -- err = NewTTransportExceptionFromError(err) -- } -- return --} -- --func (p *StreamTransport) RemainingBytes() (num_bytes uint64) { -- const maxSize = ^uint64(0) -- return maxSize // the thruth is, we just don't know unless framed is used --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go -deleted file mode 100644 -index 442fa9144d47f..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go -+++ /dev/null -@@ -1,583 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""encoding/base64"" -- ""fmt"" --) -- --const ( -- THRIFT_JSON_PROTOCOL_VERSION = 1 --) -- --// for references to _ParseContext see tsimplejson_protocol.go -- --// JSON protocol implementation for thrift. --// --// This protocol produces/consumes a simple output format --// suitable for parsing by scripting languages. It should not be --// confused with the full-featured TJSONProtocol. --// --type TJSONProtocol struct { -- *TSimpleJSONProtocol --} -- --// Constructor --func NewTJSONProtocol(t TTransport) *TJSONProtocol { -- v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)} -- v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL)) -- v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL)) -- return v --} -- --// Factory --type TJSONProtocolFactory struct{} -- --func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { -- return NewTJSONProtocol(trans) --} -- --func NewTJSONProtocolFactory() *TJSONProtocolFactory { -- return &TJSONProtocolFactory{} --} -- --func (p *TJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { -- p.resetContextStack() // THRIFT-3735 -- if e := p.OutputListBegin(); e != nil { -- return e -- } -- if e := p.WriteI32(THRIFT_JSON_PROTOCOL_VERSION); e != nil { -- return e -- } -- if e := p.WriteString(name); e != nil { -- return e -- } -- if e := p.WriteByte(int8(typeId)); e != nil { -- return e -- } -- if e := p.WriteI32(seqId); e != nil { -- return e -- } -- return nil --} -- --func (p *TJSONProtocol) WriteMessageEnd() error { -- return p.OutputListEnd() --} -- --func (p *TJSONProtocol) WriteStructBegin(name string) error { -- if e := p.OutputObjectBegin(); e != nil { -- return e -- } -- return nil --} -- --func (p *TJSONProtocol) WriteStructEnd() error { -- return p.OutputObjectEnd() --} -- --func (p *TJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { -- if e := p.WriteI16(id); e != nil { -- return e -- } -- if e := p.OutputObjectBegin(); e != nil { -- return e -- } -- s, e1 := p.TypeIdToString(typeId) -- if e1 != nil { -- return e1 -- } -- if e := p.WriteString(s); e != nil { -- return e -- } -- return nil --} -- --func (p *TJSONProtocol) WriteFieldEnd() error { -- return p.OutputObjectEnd() --} -- --func (p *TJSONProtocol) WriteFieldStop() error { return nil } -- --func (p *TJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { -- if e := p.OutputListBegin(); e != nil { -- return e -- } -- s, e1 := p.TypeIdToString(keyType) -- if e1 != nil { -- return e1 -- } -- if e := p.WriteString(s); e != nil { -- return e -- } -- s, e1 = p.TypeIdToString(valueType) -- if e1 != nil { -- return e1 -- } -- if e := p.WriteString(s); e != nil { -- return e -- } -- if e := p.WriteI64(int64(size)); e != nil { -- return e -- } -- return p.OutputObjectBegin() --} -- --func (p *TJSONProtocol) WriteMapEnd() error { -- if e := p.OutputObjectEnd(); e != nil { -- return e -- } -- return p.OutputListEnd() --} -- --func (p *TJSONProtocol) WriteListBegin(elemType TType, size int) error { -- return p.OutputElemListBegin(elemType, size) --} -- --func (p *TJSONProtocol) WriteListEnd() error { -- return p.OutputListEnd() --} -- --func (p *TJSONProtocol) WriteSetBegin(elemType TType, size int) error { -- return p.OutputElemListBegin(elemType, size) --} -- --func (p *TJSONProtocol) WriteSetEnd() error { -- return p.OutputListEnd() --} -- --func (p *TJSONProtocol) WriteBool(b bool) error { -- if b { -- return p.WriteI32(1) -- } -- return p.WriteI32(0) --} -- --func (p *TJSONProtocol) WriteByte(b int8) error { -- return p.WriteI32(int32(b)) --} -- --func (p *TJSONProtocol) WriteI16(v int16) error { -- return p.WriteI32(int32(v)) --} -- --func (p *TJSONProtocol) WriteI32(v int32) error { -- return p.OutputI64(int64(v)) --} -- --func (p *TJSONProtocol) WriteI64(v int64) error { -- return p.OutputI64(int64(v)) --} -- --func (p *TJSONProtocol) WriteDouble(v float64) error { -- return p.OutputF64(v) --} -- --func (p *TJSONProtocol) WriteString(v string) error { -- return p.OutputString(v) --} -- --func (p *TJSONProtocol) WriteBinary(v []byte) error { -- // JSON library only takes in a string, -- // not an arbitrary byte array, to ensure bytes are transmitted -- // efficiently we must convert this into a valid JSON string -- // therefore we use base64 encoding to avoid excessive escaping/quoting -- if e := p.OutputPreValue(); e != nil { -- return e -- } -- if _, e := p.write(JSON_QUOTE_BYTES); e != nil { -- return NewTProtocolException(e) -- } -- writer := base64.NewEncoder(base64.StdEncoding, p.writer) -- if _, e := writer.Write(v); e != nil { -- p.writer.Reset(p.trans) // THRIFT-3735 -- return NewTProtocolException(e) -- } -- if e := writer.Close(); e != nil { -- return NewTProtocolException(e) -- } -- if _, e := p.write(JSON_QUOTE_BYTES); e != nil { -- return NewTProtocolException(e) -- } -- return p.OutputPostValue() --} -- --// Reading methods. --func (p *TJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { -- p.resetContextStack() // THRIFT-3735 -- if isNull, err := p.ParseListBegin(); isNull || err != nil { -- return name, typeId, seqId, err -- } -- version, err := p.ReadI32() -- if err != nil { -- return name, typeId, seqId, err -- } -- if version != THRIFT_JSON_PROTOCOL_VERSION { -- e := fmt.Errorf(""Unknown Protocol version %d, expected version %d"", version, THRIFT_JSON_PROTOCOL_VERSION) -- return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e) -- -- } -- if name, err = p.ReadString(); err != nil { -- return name, typeId, seqId, err -- } -- bTypeId, err := p.ReadByte() -- typeId = TMessageType(bTypeId) -- if err != nil { -- return name, typeId, seqId, err -- } -- if seqId, err = p.ReadI32(); err != nil { -- return name, typeId, seqId, err -- } -- return name, typeId, seqId, nil --} -- --func (p *TJSONProtocol) ReadMessageEnd() error { -- err := p.ParseListEnd() -- return err --} -- --func (p *TJSONProtocol) ReadStructBegin() (name string, err error) { -- _, err = p.ParseObjectStart() -- return """", err --} -- --func (p *TJSONProtocol) ReadStructEnd() error { -- return p.ParseObjectEnd() --} -- --func (p *TJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { -- b, _ := p.reader.Peek(1) -- if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] { -- return """", STOP, -1, nil -- } -- fieldId, err := p.ReadI16() -- if err != nil { -- return """", STOP, fieldId, err -- } -- if _, err = p.ParseObjectStart(); err != nil { -- return """", STOP, fieldId, err -- } -- sType, err := p.ReadString() -- if err != nil { -- return """", STOP, fieldId, err -- } -- fType, err := p.StringToTypeId(sType) -- return """", fType, fieldId, err --} -- --func (p *TJSONProtocol) ReadFieldEnd() error { -- return p.ParseObjectEnd() --} -- --func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) { -- if isNull, e := p.ParseListBegin(); isNull || e != nil { -- return VOID, VOID, 0, e -- } -- -- // read keyType -- sKeyType, e := p.ReadString() -- if e != nil { -- return keyType, valueType, size, e -- } -- keyType, e = p.StringToTypeId(sKeyType) -- if e != nil { -- return keyType, valueType, size, e -- } -- -- // read valueType -- sValueType, e := p.ReadString() -- if e != nil { -- return keyType, valueType, size, e -- } -- valueType, e = p.StringToTypeId(sValueType) -- if e != nil { -- return keyType, valueType, size, e -- } -- -- // read size -- iSize, e := p.ReadI64() -- if e != nil { -- return keyType, valueType, size, e -- } -- size = int(iSize) -- -- _, e = p.ParseObjectStart() -- return keyType, valueType, size, e --} -- --func (p *TJSONProtocol) ReadMapEnd() error { -- e := p.ParseObjectEnd() -- if e != nil { -- return e -- } -- return p.ParseListEnd() --} -- --func (p *TJSONProtocol) ReadListBegin() (elemType TType, size int, e error) { -- return p.ParseElemListBegin() --} -- --func (p *TJSONProtocol) ReadListEnd() error { -- return p.ParseListEnd() --} -- --func (p *TJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) { -- return p.ParseElemListBegin() --} -- --func (p *TJSONProtocol) ReadSetEnd() error { -- return p.ParseListEnd() --} -- --func (p *TJSONProtocol) ReadBool() (bool, error) { -- value, err := p.ReadI32() -- return (value != 0), err --} -- --func (p *TJSONProtocol) ReadByte() (int8, error) { -- v, err := p.ReadI64() -- return int8(v), err --} -- --func (p *TJSONProtocol) ReadI16() (int16, error) { -- v, err := p.ReadI64() -- return int16(v), err --} -- --func (p *TJSONProtocol) ReadI32() (int32, error) { -- v, err := p.ReadI64() -- return int32(v), err --} -- --func (p *TJSONProtocol) ReadI64() (int64, error) { -- v, _, err := p.ParseI64() -- return v, err --} -- --func (p *TJSONProtocol) ReadDouble() (float64, error) { -- v, _, err := p.ParseF64() -- return v, err --} -- --func (p *TJSONProtocol) ReadString() (string, error) { -- var v string -- if err := p.ParsePreValue(); err != nil { -- return v, err -- } -- f, _ := p.reader.Peek(1) -- if len(f) > 0 && f[0] == JSON_QUOTE { -- p.reader.ReadByte() -- value, err := p.ParseStringBody() -- v = value -- if err != nil { -- return v, err -- } -- } else if len(f) > 0 && f[0] == JSON_NULL[0] { -- b := make([]byte, len(JSON_NULL)) -- _, err := p.reader.Read(b) -- if err != nil { -- return v, NewTProtocolException(err) -- } -- if string(b) != string(JSON_NULL) { -- e := fmt.Errorf(""Expected a JSON string, found unquoted data started with %s"", string(b)) -- return v, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- } else { -- e := fmt.Errorf(""Expected a JSON string, found unquoted data started with %s"", string(f)) -- return v, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- return v, p.ParsePostValue() --} -- --func (p *TJSONProtocol) ReadBinary() ([]byte, error) { -- var v []byte -- if err := p.ParsePreValue(); err != nil { -- return nil, err -- } -- f, _ := p.reader.Peek(1) -- if len(f) > 0 && f[0] == JSON_QUOTE { -- p.reader.ReadByte() -- value, err := p.ParseBase64EncodedBody() -- v = value -- if err != nil { -- return v, err -- } -- } else if len(f) > 0 && f[0] == JSON_NULL[0] { -- b := make([]byte, len(JSON_NULL)) -- _, err := p.reader.Read(b) -- if err != nil { -- return v, NewTProtocolException(err) -- } -- if string(b) != string(JSON_NULL) { -- e := fmt.Errorf(""Expected a JSON string, found unquoted data started with %s"", string(b)) -- return v, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- } else { -- e := fmt.Errorf(""Expected a JSON string, found unquoted data started with %s"", string(f)) -- return v, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- -- return v, p.ParsePostValue() --} -- --func (p *TJSONProtocol) Flush() (err error) { -- err = p.writer.Flush() -- if err == nil { -- err = p.trans.Flush() -- } -- return NewTProtocolException(err) --} -- --func (p *TJSONProtocol) Skip(fieldType TType) (err error) { -- return SkipDefaultDepth(p, fieldType) --} -- --func (p *TJSONProtocol) Transport() TTransport { -- return p.trans --} -- --func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error { -- if e := p.OutputListBegin(); e != nil { -- return e -- } -- s, e1 := p.TypeIdToString(elemType) -- if e1 != nil { -- return e1 -- } -- if e := p.WriteString(s); e != nil { -- return e -- } -- if e := p.WriteI64(int64(size)); e != nil { -- return e -- } -- return nil --} -- --func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { -- if isNull, e := p.ParseListBegin(); isNull || e != nil { -- return VOID, 0, e -- } -- sElemType, err := p.ReadString() -- if err != nil { -- return VOID, size, err -- } -- elemType, err = p.StringToTypeId(sElemType) -- if err != nil { -- return elemType, size, err -- } -- nSize, err2 := p.ReadI64() -- size = int(nSize) -- return elemType, size, err2 --} -- --func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) { -- if isNull, e := p.ParseListBegin(); isNull || e != nil { -- return VOID, 0, e -- } -- sElemType, err := p.ReadString() -- if err != nil { -- return VOID, size, err -- } -- elemType, err = p.StringToTypeId(sElemType) -- if err != nil { -- return elemType, size, err -- } -- nSize, err2 := p.ReadI64() -- size = int(nSize) -- return elemType, size, err2 --} -- --func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error { -- if e := p.OutputListBegin(); e != nil { -- return e -- } -- s, e1 := p.TypeIdToString(elemType) -- if e1 != nil { -- return e1 -- } -- if e := p.OutputString(s); e != nil { -- return e -- } -- if e := p.OutputI64(int64(size)); e != nil { -- return e -- } -- return nil --} -- --func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) { -- switch byte(fieldType) { -- case BOOL: -- return ""tf"", nil -- case BYTE: -- return ""i8"", nil -- case I16: -- return ""i16"", nil -- case I32: -- return ""i32"", nil -- case I64: -- return ""i64"", nil -- case DOUBLE: -- return ""dbl"", nil -- case STRING: -- return ""str"", nil -- case STRUCT: -- return ""rec"", nil -- case MAP: -- return ""map"", nil -- case SET: -- return ""set"", nil -- case LIST: -- return ""lst"", nil -- } -- -- e := fmt.Errorf(""Unknown fieldType: %d"", int(fieldType)) -- return """", NewTProtocolExceptionWithType(INVALID_DATA, e) --} -- --func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) { -- switch fieldType { -- case ""tf"": -- return TType(BOOL), nil -- case ""i8"": -- return TType(BYTE), nil -- case ""i16"": -- return TType(I16), nil -- case ""i32"": -- return TType(I32), nil -- case ""i64"": -- return TType(I64), nil -- case ""dbl"": -- return TType(DOUBLE), nil -- case ""str"": -- return TType(STRING), nil -- case ""rec"": -- return TType(STRUCT), nil -- case ""map"": -- return TType(MAP), nil -- case ""set"": -- return TType(SET), nil -- case ""lst"": -- return TType(LIST), nil -- } -- -- e := fmt.Errorf(""Unknown type identifier: %s"", fieldType) -- return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go b/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go -deleted file mode 100644 -index 97a4edfa5dba4..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go -+++ /dev/null -@@ -1,79 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""bytes"" --) -- --// Memory buffer-based implementation of the TTransport interface. --type TMemoryBuffer struct { -- *bytes.Buffer -- size int --} -- --type TMemoryBufferTransportFactory struct { -- size int --} -- --func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) { -- if trans != nil { -- t, ok := trans.(*TMemoryBuffer) -- if ok && t.size > 0 { -- return NewTMemoryBufferLen(t.size), nil -- } -- } -- return NewTMemoryBufferLen(p.size), nil --} -- --func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory { -- return &TMemoryBufferTransportFactory{size: size} --} -- --func NewTMemoryBuffer() *TMemoryBuffer { -- return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0} --} -- --func NewTMemoryBufferLen(size int) *TMemoryBuffer { -- buf := make([]byte, 0, size) -- return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size} --} -- --func (p *TMemoryBuffer) IsOpen() bool { -- return true --} -- --func (p *TMemoryBuffer) Open() error { -- return nil --} -- --func (p *TMemoryBuffer) Close() error { -- p.Buffer.Reset() -- return nil --} -- --// Flushing a memory buffer is a no-op --func (p *TMemoryBuffer) Flush() error { -- return nil --} -- --func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) { -- return uint64(p.Buffer.Len()) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go b/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go -deleted file mode 100644 -index 25ab2e98a256e..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go -+++ /dev/null -@@ -1,31 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --// Message type constants in the Thrift protocol. --type TMessageType int32 -- --const ( -- INVALID_TMESSAGE_TYPE TMessageType = 0 -- CALL TMessageType = 1 -- REPLY TMessageType = 2 -- EXCEPTION TMessageType = 3 -- ONEWAY TMessageType = 4 --) -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go -deleted file mode 100644 -index b7f4f8a1ccc88..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go -+++ /dev/null -@@ -1,139 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --/* --TMultiplexedProtocol is a protocol-independent concrete decorator --that allows a Thrift client to communicate with a multiplexing Thrift server, --by prepending the service name to the function name during function calls. -- --NOTE: THIS IS NOT USED BY SERVERS. On the server, use TMultiplexedProcessor to handle request --from a multiplexing client. -- --This example uses a single socket transport to invoke two services: -- --socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT) --transport := thrift.NewTFramedTransport(socket) --protocol := thrift.NewTBinaryProtocolTransport(transport) -- --mp := thrift.NewTMultiplexedProtocol(protocol, ""Calculator"") --service := Calculator.NewCalculatorClient(mp) -- --mp2 := thrift.NewTMultiplexedProtocol(protocol, ""WeatherReport"") --service2 := WeatherReport.NewWeatherReportClient(mp2) -- --err := transport.Open() --if err != nil { -- t.Fatal(""Unable to open client socket"", err) --} -- --fmt.Println(service.Add(2,2)) --fmt.Println(service2.GetTemperature()) --*/ -- --type TMultiplexedProtocol struct { -- TProtocol -- serviceName string --} -- --const MULTIPLEXED_SEPARATOR = "":"" -- --func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol { -- return &TMultiplexedProtocol{ -- TProtocol: protocol, -- serviceName: serviceName, -- } --} -- --func (t *TMultiplexedProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { -- if typeId == CALL || typeId == ONEWAY { -- return t.TProtocol.WriteMessageBegin(t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid) -- } else { -- return t.TProtocol.WriteMessageBegin(name, typeId, seqid) -- } --} -- --/* --TMultiplexedProcessor is a TProcessor allowing --a single TServer to provide multiple services. -- --To do so, you instantiate the processor and then register additional --processors with it, as shown in the following example: -- --var processor = thrift.NewTMultiplexedProcessor() -- --firstProcessor := --processor.RegisterProcessor(""FirstService"", firstProcessor) -- --processor.registerProcessor( -- ""Calculator"", -- Calculator.NewCalculatorProcessor(&CalculatorHandler{}), --) -- --processor.registerProcessor( -- ""WeatherReport"", -- WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}), --) -- --serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT) --if err != nil { -- t.Fatal(""Unable to create server socket"", err) --} --server := thrift.NewTSimpleServer2(processor, serverTransport) --server.Serve(); --*/ -- --type TMultiplexedProcessor struct { -- serviceProcessorMap map[string]TProcessor -- DefaultProcessor TProcessor --} -- --func NewTMultiplexedProcessor() *TMultiplexedProcessor { -- return &TMultiplexedProcessor{ -- serviceProcessorMap: make(map[string]TProcessor), -- } --} -- --func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) { -- t.DefaultProcessor = processor --} -- --func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) { -- if t.serviceProcessorMap == nil { -- t.serviceProcessorMap = make(map[string]TProcessor) -- } -- t.serviceProcessorMap[name] = processor --} -- --//Protocol that use stored message for ReadMessageBegin --type storedMessageProtocol struct { -- TProtocol -- name string -- typeId TMessageType -- seqid int32 --} -- --func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol { -- return &storedMessageProtocol{protocol, name, typeId, seqid} --} -- --func (s *storedMessageProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) { -- return s.name, s.typeId, s.seqid, nil --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_go17.go -deleted file mode 100644 -index c71035e680c54..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_go17.go -+++ /dev/null -@@ -1,53 +0,0 @@ --// +build go1.7 -- --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""context"" -- ""fmt"" -- ""strings"" --) -- --func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { -- name, typeId, seqid, err := in.ReadMessageBegin() -- if err != nil { -- return false, err -- } -- if typeId != CALL && typeId != ONEWAY { -- return false, fmt.Errorf(""Unexpected message type %v"", typeId) -- } -- //extract the service name -- v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) -- if len(v) != 2 { -- if t.DefaultProcessor != nil { -- smb := NewStoredMessageProtocol(in, name, typeId, seqid) -- return t.DefaultProcessor.Process(ctx, smb, out) -- } -- return false, fmt.Errorf(""Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?"", name) -- } -- actualProcessor, ok := t.serviceProcessorMap[v[0]] -- if !ok { -- return false, fmt.Errorf(""Service name not found: %s. Did you forget to call registerProcessor()?"", v[0]) -- } -- smb := NewStoredMessageProtocol(in, v[1], typeId, seqid) -- return actualProcessor.Process(ctx, smb, out) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_pre_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_pre_go17.go -deleted file mode 100644 -index 5c27b38755c00..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_pre_go17.go -+++ /dev/null -@@ -1,54 +0,0 @@ --// +build !go1.7 -- --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""fmt"" -- ""strings"" -- -- ""golang.org/x/net/context"" --) -- --func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { -- name, typeId, seqid, err := in.ReadMessageBegin() -- if err != nil { -- return false, err -- } -- if typeId != CALL && typeId != ONEWAY { -- return false, fmt.Errorf(""Unexpected message type %v"", typeId) -- } -- //extract the service name -- v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) -- if len(v) != 2 { -- if t.DefaultProcessor != nil { -- smb := NewStoredMessageProtocol(in, name, typeId, seqid) -- return t.DefaultProcessor.Process(ctx, smb, out) -- } -- return false, fmt.Errorf(""Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?"", name) -- } -- actualProcessor, ok := t.serviceProcessorMap[v[0]] -- if !ok { -- return false, fmt.Errorf(""Service name not found: %s. Did you forget to call registerProcessor()?"", v[0]) -- } -- smb := NewStoredMessageProtocol(in, v[1], typeId, seqid) -- return actualProcessor.Process(ctx, smb, out) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go b/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go -deleted file mode 100644 -index aa8daa9b54f92..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go -+++ /dev/null -@@ -1,164 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""math"" -- ""strconv"" --) -- --type Numeric interface { -- Int64() int64 -- Int32() int32 -- Int16() int16 -- Byte() byte -- Int() int -- Float64() float64 -- Float32() float32 -- String() string -- isNull() bool --} -- --type numeric struct { -- iValue int64 -- dValue float64 -- sValue string -- isNil bool --} -- --var ( -- INFINITY Numeric -- NEGATIVE_INFINITY Numeric -- NAN Numeric -- ZERO Numeric -- NUMERIC_NULL Numeric --) -- --func NewNumericFromDouble(dValue float64) Numeric { -- if math.IsInf(dValue, 1) { -- return INFINITY -- } -- if math.IsInf(dValue, -1) { -- return NEGATIVE_INFINITY -- } -- if math.IsNaN(dValue) { -- return NAN -- } -- iValue := int64(dValue) -- sValue := strconv.FormatFloat(dValue, 'g', 10, 64) -- isNil := false -- return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} --} -- --func NewNumericFromI64(iValue int64) Numeric { -- dValue := float64(iValue) -- sValue := string(iValue) -- isNil := false -- return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} --} -- --func NewNumericFromI32(iValue int32) Numeric { -- dValue := float64(iValue) -- sValue := string(iValue) -- isNil := false -- return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil} --} -- --func NewNumericFromString(sValue string) Numeric { -- if sValue == INFINITY.String() { -- return INFINITY -- } -- if sValue == NEGATIVE_INFINITY.String() { -- return NEGATIVE_INFINITY -- } -- if sValue == NAN.String() { -- return NAN -- } -- iValue, _ := strconv.ParseInt(sValue, 10, 64) -- dValue, _ := strconv.ParseFloat(sValue, 64) -- isNil := len(sValue) == 0 -- return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} --} -- --func NewNumericFromJSONString(sValue string, isNull bool) Numeric { -- if isNull { -- return NewNullNumeric() -- } -- if sValue == JSON_INFINITY { -- return INFINITY -- } -- if sValue == JSON_NEGATIVE_INFINITY { -- return NEGATIVE_INFINITY -- } -- if sValue == JSON_NAN { -- return NAN -- } -- iValue, _ := strconv.ParseInt(sValue, 10, 64) -- dValue, _ := strconv.ParseFloat(sValue, 64) -- return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull} --} -- --func NewNullNumeric() Numeric { -- return &numeric{iValue: 0, dValue: 0.0, sValue: """", isNil: true} --} -- --func (p *numeric) Int64() int64 { -- return p.iValue --} -- --func (p *numeric) Int32() int32 { -- return int32(p.iValue) --} -- --func (p *numeric) Int16() int16 { -- return int16(p.iValue) --} -- --func (p *numeric) Byte() byte { -- return byte(p.iValue) --} -- --func (p *numeric) Int() int { -- return int(p.iValue) --} -- --func (p *numeric) Float64() float64 { -- return p.dValue --} -- --func (p *numeric) Float32() float32 { -- return float32(p.dValue) --} -- --func (p *numeric) String() string { -- return p.sValue --} -- --func (p *numeric) isNull() bool { -- return p.isNil --} -- --func init() { -- INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: ""Infinity"", isNil: false} -- NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: ""-Infinity"", isNil: false} -- NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: ""NaN"", isNil: false} -- ZERO = &numeric{iValue: 0, dValue: 0, sValue: ""0"", isNil: false} -- NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: ""0"", isNil: true} --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go b/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go -deleted file mode 100644 -index 8d6b2c2159dcc..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go -+++ /dev/null -@@ -1,50 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --/////////////////////////////////////////////////////////////////////////////// --// This file is home to helpers that convert from various base types to --// respective pointer types. This is necessary because Go does not permit --// references to constants, nor can a pointer type to base type be allocated --// and initialized in a single expression. --// --// E.g., this is not allowed: --// --// var ip *int = &5 --// --// But this *is* allowed: --// --// func IntPtr(i int) *int { return &i } --// var ip *int = IntPtr(5) --// --// Since pointers to base types are commonplace as [optional] fields in --// exported thrift structs, we factor such helpers here. --/////////////////////////////////////////////////////////////////////////////// -- --func Float32Ptr(v float32) *float32 { return &v } --func Float64Ptr(v float64) *float64 { return &v } --func IntPtr(v int) *int { return &v } --func Int32Ptr(v int32) *int32 { return &v } --func Int64Ptr(v int64) *int64 { return &v } --func StringPtr(v string) *string { return &v } --func Uint32Ptr(v uint32) *uint32 { return &v } --func Uint64Ptr(v uint64) *uint64 { return &v } --func BoolPtr(v bool) *bool { return &v } --func ByteSlicePtr(v []byte) *[]byte { return &v } -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/pre_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/pre_go17.go -deleted file mode 100644 -index cb564b8dbd6a3..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/pre_go17.go -+++ /dev/null -@@ -1,26 +0,0 @@ --// +build !go1.7 -- --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ""golang.org/x/net/context"" -- --var defaultCtx = context.Background() -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/processor.go b/vendor/github.com/apache/thrift/lib/go/thrift/processor.go -deleted file mode 100644 -index 566aaaf718292..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/processor.go -+++ /dev/null -@@ -1,34 +0,0 @@ --// +build !go1.7 -- --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ""golang.org/x/net/context"" -- --// A processor is a generic object which operates upon an input stream and --// writes to some output stream. --type TProcessor interface { -- Process(ctx context.Context, in, out TProtocol) (bool, TException) --} -- --type TProcessorFunction interface { -- Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go b/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go -deleted file mode 100644 -index 9d645df244623..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go -+++ /dev/null -@@ -1,58 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --// The default processor factory just returns a singleton --// instance. --type TProcessorFactory interface { -- GetProcessor(trans TTransport) TProcessor --} -- --type tProcessorFactory struct { -- processor TProcessor --} -- --func NewTProcessorFactory(p TProcessor) TProcessorFactory { -- return &tProcessorFactory{processor: p} --} -- --func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor { -- return p.processor --} -- --/** -- * The default processor factory just returns a singleton -- * instance. -- */ --type TProcessorFunctionFactory interface { -- GetProcessorFunction(trans TTransport) TProcessorFunction --} -- --type tProcessorFunctionFactory struct { -- processor TProcessorFunction --} -- --func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory { -- return &tProcessorFunctionFactory{processor: p} --} -- --func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction { -- return p.processor --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/processor_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/processor_go17.go -deleted file mode 100644 -index fb0b165dcc71e..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/processor_go17.go -+++ /dev/null -@@ -1,34 +0,0 @@ --// +build go1.7 -- --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ""context"" -- --// A processor is a generic object which operates upon an input stream and --// writes to some output stream. --type TProcessor interface { -- Process(ctx context.Context, in, out TProtocol) (bool, TException) --} -- --type TProcessorFunction interface { -- Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go -deleted file mode 100644 -index 25e6d24b90451..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go -+++ /dev/null -@@ -1,178 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""errors"" -- ""fmt"" --) -- --const ( -- VERSION_MASK = 0xffff0000 -- VERSION_1 = 0x80010000 --) -- --type TProtocol interface { -- WriteMessageBegin(name string, typeId TMessageType, seqid int32) error -- WriteMessageEnd() error -- WriteStructBegin(name string) error -- WriteStructEnd() error -- WriteFieldBegin(name string, typeId TType, id int16) error -- WriteFieldEnd() error -- WriteFieldStop() error -- WriteMapBegin(keyType TType, valueType TType, size int) error -- WriteMapEnd() error -- WriteListBegin(elemType TType, size int) error -- WriteListEnd() error -- WriteSetBegin(elemType TType, size int) error -- WriteSetEnd() error -- WriteBool(value bool) error -- WriteByte(value int8) error -- WriteI16(value int16) error -- WriteI32(value int32) error -- WriteI64(value int64) error -- WriteDouble(value float64) error -- WriteString(value string) error -- WriteBinary(value []byte) error -- -- ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) -- ReadMessageEnd() error -- ReadStructBegin() (name string, err error) -- ReadStructEnd() error -- ReadFieldBegin() (name string, typeId TType, id int16, err error) -- ReadFieldEnd() error -- ReadMapBegin() (keyType TType, valueType TType, size int, err error) -- ReadMapEnd() error -- ReadListBegin() (elemType TType, size int, err error) -- ReadListEnd() error -- ReadSetBegin() (elemType TType, size int, err error) -- ReadSetEnd() error -- ReadBool() (value bool, err error) -- ReadByte() (value int8, err error) -- ReadI16() (value int16, err error) -- ReadI32() (value int32, err error) -- ReadI64() (value int64, err error) -- ReadDouble() (value float64, err error) -- ReadString() (value string, err error) -- ReadBinary() (value []byte, err error) -- -- Skip(fieldType TType) (err error) -- Flush() (err error) -- -- Transport() TTransport --} -- --// The maximum recursive depth the skip() function will traverse --const DEFAULT_RECURSION_DEPTH = 64 -- --// Skips over the next data element from the provided input TProtocol object. --func SkipDefaultDepth(prot TProtocol, typeId TType) (err error) { -- return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH) --} -- --// Skips over the next data element from the provided input TProtocol object. --func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) { -- -- if maxDepth <= 0 { -- return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New(""Depth limit exceeded"")) -- } -- -- switch fieldType { -- case STOP: -- return -- case BOOL: -- _, err = self.ReadBool() -- return -- case BYTE: -- _, err = self.ReadByte() -- return -- case I16: -- _, err = self.ReadI16() -- return -- case I32: -- _, err = self.ReadI32() -- return -- case I64: -- _, err = self.ReadI64() -- return -- case DOUBLE: -- _, err = self.ReadDouble() -- return -- case STRING: -- _, err = self.ReadString() -- return -- case STRUCT: -- if _, err = self.ReadStructBegin(); err != nil { -- return err -- } -- for { -- _, typeId, _, _ := self.ReadFieldBegin() -- if typeId == STOP { -- break -- } -- err := Skip(self, typeId, maxDepth-1) -- if err != nil { -- return err -- } -- self.ReadFieldEnd() -- } -- return self.ReadStructEnd() -- case MAP: -- keyType, valueType, size, err := self.ReadMapBegin() -- if err != nil { -- return err -- } -- for i := 0; i < size; i++ { -- err := Skip(self, keyType, maxDepth-1) -- if err != nil { -- return err -- } -- self.Skip(valueType) -- } -- return self.ReadMapEnd() -- case SET: -- elemType, size, err := self.ReadSetBegin() -- if err != nil { -- return err -- } -- for i := 0; i < size; i++ { -- err := Skip(self, elemType, maxDepth-1) -- if err != nil { -- return err -- } -- } -- return self.ReadSetEnd() -- case LIST: -- elemType, size, err := self.ReadListBegin() -- if err != nil { -- return err -- } -- for i := 0; i < size; i++ { -- err := Skip(self, elemType, maxDepth-1) -- if err != nil { -- return err -- } -- } -- return self.ReadListEnd() -- default: -- return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf(""Unknown data type %d"", fieldType))) -- } -- return nil --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go -deleted file mode 100644 -index 29ab75d92159f..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go -+++ /dev/null -@@ -1,77 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""encoding/base64"" --) -- --// Thrift Protocol exception --type TProtocolException interface { -- TException -- TypeId() int --} -- --const ( -- UNKNOWN_PROTOCOL_EXCEPTION = 0 -- INVALID_DATA = 1 -- NEGATIVE_SIZE = 2 -- SIZE_LIMIT = 3 -- BAD_VERSION = 4 -- NOT_IMPLEMENTED = 5 -- DEPTH_LIMIT = 6 --) -- --type tProtocolException struct { -- typeId int -- message string --} -- --func (p *tProtocolException) TypeId() int { -- return p.typeId --} -- --func (p *tProtocolException) String() string { -- return p.message --} -- --func (p *tProtocolException) Error() string { -- return p.message --} -- --func NewTProtocolException(err error) TProtocolException { -- if err == nil { -- return nil -- } -- if e, ok := err.(TProtocolException); ok { -- return e -- } -- if _, ok := err.(base64.CorruptInputError); ok { -- return &tProtocolException{INVALID_DATA, err.Error()} -- } -- return &tProtocolException{UNKNOWN_PROTOCOL_EXCEPTION, err.Error()} --} -- --func NewTProtocolExceptionWithType(errType int, err error) TProtocolException { -- if err == nil { -- return nil -- } -- return &tProtocolException{errType, err.Error()} --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go b/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go -deleted file mode 100644 -index c40f796d886ac..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go -+++ /dev/null -@@ -1,25 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --// Factory interface for constructing protocol instances. --type TProtocolFactory interface { -- GetProtocol(trans TTransport) TProtocol --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go -deleted file mode 100644 -index 4025bebeaa416..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go -+++ /dev/null -@@ -1,68 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ""io"" -- --type RichTransport struct { -- TTransport --} -- --// Wraps Transport to provide TRichTransport interface --func NewTRichTransport(trans TTransport) *RichTransport { -- return &RichTransport{trans} --} -- --func (r *RichTransport) ReadByte() (c byte, err error) { -- return readByte(r.TTransport) --} -- --func (r *RichTransport) WriteByte(c byte) error { -- return writeByte(r.TTransport, c) --} -- --func (r *RichTransport) WriteString(s string) (n int, err error) { -- return r.Write([]byte(s)) --} -- --func (r *RichTransport) RemainingBytes() (num_bytes uint64) { -- return r.TTransport.RemainingBytes() --} -- --func readByte(r io.Reader) (c byte, err error) { -- v := [1]byte{0} -- n, err := r.Read(v[0:1]) -- if n > 0 && (err == nil || err == io.EOF) { -- return v[0], nil -- } -- if n > 0 && err != nil { -- return v[0], err -- } -- if err != nil { -- return 0, err -- } -- return v[0], nil --} -- --func writeByte(w io.Writer, c byte) error { -- v := [1]byte{c} -- _, err := w.Write(v[0:1]) -- return err --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go b/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go -deleted file mode 100644 -index 7712229990916..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go -+++ /dev/null -@@ -1,75 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --type TSerializer struct { -- Transport *TMemoryBuffer -- Protocol TProtocol --} -- --type TStruct interface { -- Write(p TProtocol) error -- Read(p TProtocol) error --} -- --func NewTSerializer() *TSerializer { -- transport := NewTMemoryBufferLen(1024) -- protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport) -- -- return &TSerializer{ -- transport, -- protocol} --} -- --func (t *TSerializer) WriteString(msg TStruct) (s string, err error) { -- t.Transport.Reset() -- -- if err = msg.Write(t.Protocol); err != nil { -- return -- } -- -- if err = t.Protocol.Flush(); err != nil { -- return -- } -- if err = t.Transport.Flush(); err != nil { -- return -- } -- -- return t.Transport.String(), nil --} -- --func (t *TSerializer) Write(msg TStruct) (b []byte, err error) { -- t.Transport.Reset() -- -- if err = msg.Write(t.Protocol); err != nil { -- return -- } -- -- if err = t.Protocol.Flush(); err != nil { -- return -- } -- -- if err = t.Transport.Flush(); err != nil { -- return -- } -- -- b = append(b, t.Transport.Bytes()...) -- return --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/server.go b/vendor/github.com/apache/thrift/lib/go/thrift/server.go -deleted file mode 100644 -index f813fa3532c35..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/server.go -+++ /dev/null -@@ -1,35 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --type TServer interface { -- ProcessorFactory() TProcessorFactory -- ServerTransport() TServerTransport -- InputTransportFactory() TTransportFactory -- OutputTransportFactory() TTransportFactory -- InputProtocolFactory() TProtocolFactory -- OutputProtocolFactory() TProtocolFactory -- -- // Starts the server -- Serve() error -- // Stops the server. This is optional on a per-implementation basis. Not -- // all servers are required to be cleanly stoppable. -- Stop() error --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go -deleted file mode 100644 -index 80313c4be52e4..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go -+++ /dev/null -@@ -1,134 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""net"" -- ""sync"" -- ""time"" --) -- --type TServerSocket struct { -- listener net.Listener -- addr net.Addr -- clientTimeout time.Duration -- -- // Protects the interrupted value to make it thread safe. -- mu sync.RWMutex -- interrupted bool --} -- --func NewTServerSocket(listenAddr string) (*TServerSocket, error) { -- return NewTServerSocketTimeout(listenAddr, 0) --} -- --func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) { -- addr, err := net.ResolveTCPAddr(""tcp"", listenAddr) -- if err != nil { -- return nil, err -- } -- return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil --} -- --// Creates a TServerSocket from a net.Addr --func NewTServerSocketFromAddrTimeout(addr net.Addr, clientTimeout time.Duration) *TServerSocket { -- return &TServerSocket{addr: addr, clientTimeout: clientTimeout} --} -- --func (p *TServerSocket) Listen() error { -- p.mu.Lock() -- defer p.mu.Unlock() -- if p.IsListening() { -- return nil -- } -- l, err := net.Listen(p.addr.Network(), p.addr.String()) -- if err != nil { -- return err -- } -- p.listener = l -- return nil --} -- --func (p *TServerSocket) Accept() (TTransport, error) { -- p.mu.RLock() -- interrupted := p.interrupted -- p.mu.RUnlock() -- -- if interrupted { -- return nil, errTransportInterrupted -- } -- -- listener := p.listener -- if listener == nil { -- return nil, NewTTransportException(NOT_OPEN, ""No underlying server socket"") -- } -- -- conn, err := listener.Accept() -- if err != nil { -- return nil, NewTTransportExceptionFromError(err) -- } -- return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil --} -- --// Checks whether the socket is listening. --func (p *TServerSocket) IsListening() bool { -- return p.listener != nil --} -- --// Connects the socket, creating a new socket object if necessary. --func (p *TServerSocket) Open() error { -- p.mu.Lock() -- defer p.mu.Unlock() -- if p.IsListening() { -- return NewTTransportException(ALREADY_OPEN, ""Server socket already open"") -- } -- if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil { -- return err -- } else { -- p.listener = l -- } -- return nil --} -- --func (p *TServerSocket) Addr() net.Addr { -- if p.listener != nil { -- return p.listener.Addr() -- } -- return p.addr --} -- --func (p *TServerSocket) Close() error { -- defer func() { -- p.listener = nil -- }() -- if p.IsListening() { -- return p.listener.Close() -- } -- return nil --} -- --func (p *TServerSocket) Interrupt() error { -- p.mu.Lock() -- defer p.mu.Unlock() -- p.interrupted = true -- p.Close() -- -- return nil --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go -deleted file mode 100644 -index 51c40b64a1d9c..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go -+++ /dev/null -@@ -1,34 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --// Server transport. Object which provides client transports. --type TServerTransport interface { -- Listen() error -- Accept() (TTransport, error) -- Close() error -- -- // Optional method implementation. This signals to the server transport -- // that it should break out of any accept() or listen() that it is currently -- // blocked on. This method, if implemented, MUST be thread safe, as it may -- // be called from a different thread context than the other TServerTransport -- // methods. -- Interrupt() error --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go -deleted file mode 100644 -index 73533223182cf..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go -+++ /dev/null -@@ -1,1337 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""bufio"" -- ""bytes"" -- ""encoding/base64"" -- ""encoding/json"" -- ""fmt"" -- ""io"" -- ""math"" -- ""strconv"" --) -- --type _ParseContext int -- --const ( -- _CONTEXT_IN_TOPLEVEL _ParseContext = 1 -- _CONTEXT_IN_LIST_FIRST _ParseContext = 2 -- _CONTEXT_IN_LIST _ParseContext = 3 -- _CONTEXT_IN_OBJECT_FIRST _ParseContext = 4 -- _CONTEXT_IN_OBJECT_NEXT_KEY _ParseContext = 5 -- _CONTEXT_IN_OBJECT_NEXT_VALUE _ParseContext = 6 --) -- --func (p _ParseContext) String() string { -- switch p { -- case _CONTEXT_IN_TOPLEVEL: -- return ""TOPLEVEL"" -- case _CONTEXT_IN_LIST_FIRST: -- return ""LIST-FIRST"" -- case _CONTEXT_IN_LIST: -- return ""LIST"" -- case _CONTEXT_IN_OBJECT_FIRST: -- return ""OBJECT-FIRST"" -- case _CONTEXT_IN_OBJECT_NEXT_KEY: -- return ""OBJECT-NEXT-KEY"" -- case _CONTEXT_IN_OBJECT_NEXT_VALUE: -- return ""OBJECT-NEXT-VALUE"" -- } -- return ""UNKNOWN-PARSE-CONTEXT"" --} -- --// JSON protocol implementation for thrift. --// --// This protocol produces/consumes a simple output format --// suitable for parsing by scripting languages. It should not be --// confused with the full-featured TJSONProtocol. --// --type TSimpleJSONProtocol struct { -- trans TTransport -- -- parseContextStack []int -- dumpContext []int -- -- writer *bufio.Writer -- reader *bufio.Reader --} -- --// Constructor --func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol { -- v := &TSimpleJSONProtocol{trans: t, -- writer: bufio.NewWriter(t), -- reader: bufio.NewReader(t), -- } -- v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL)) -- v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL)) -- return v --} -- --// Factory --type TSimpleJSONProtocolFactory struct{} -- --func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { -- return NewTSimpleJSONProtocol(trans) --} -- --func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory { -- return &TSimpleJSONProtocolFactory{} --} -- --var ( -- JSON_COMMA []byte -- JSON_COLON []byte -- JSON_LBRACE []byte -- JSON_RBRACE []byte -- JSON_LBRACKET []byte -- JSON_RBRACKET []byte -- JSON_QUOTE byte -- JSON_QUOTE_BYTES []byte -- JSON_NULL []byte -- JSON_TRUE []byte -- JSON_FALSE []byte -- JSON_INFINITY string -- JSON_NEGATIVE_INFINITY string -- JSON_NAN string -- JSON_INFINITY_BYTES []byte -- JSON_NEGATIVE_INFINITY_BYTES []byte -- JSON_NAN_BYTES []byte -- json_nonbase_map_elem_bytes []byte --) -- --func init() { -- JSON_COMMA = []byte{','} -- JSON_COLON = []byte{':'} -- JSON_LBRACE = []byte{'{'} -- JSON_RBRACE = []byte{'}'} -- JSON_LBRACKET = []byte{'['} -- JSON_RBRACKET = []byte{']'} -- JSON_QUOTE = '""' -- JSON_QUOTE_BYTES = []byte{'""'} -- JSON_NULL = []byte{'n', 'u', 'l', 'l'} -- JSON_TRUE = []byte{'t', 'r', 'u', 'e'} -- JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'} -- JSON_INFINITY = ""Infinity"" -- JSON_NEGATIVE_INFINITY = ""-Infinity"" -- JSON_NAN = ""NaN"" -- JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} -- JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} -- JSON_NAN_BYTES = []byte{'N', 'a', 'N'} -- json_nonbase_map_elem_bytes = []byte{']', ',', '['} --} -- --func jsonQuote(s string) string { -- b, _ := json.Marshal(s) -- s1 := string(b) -- return s1 --} -- --func jsonUnquote(s string) (string, bool) { -- s1 := new(string) -- err := json.Unmarshal([]byte(s), s1) -- return *s1, err == nil --} -- --func mismatch(expected, actual string) error { -- return fmt.Errorf(""Expected '%s' but found '%s' while parsing JSON."", expected, actual) --} -- --func (p *TSimpleJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { -- p.resetContextStack() // THRIFT-3735 -- if e := p.OutputListBegin(); e != nil { -- return e -- } -- if e := p.WriteString(name); e != nil { -- return e -- } -- if e := p.WriteByte(int8(typeId)); e != nil { -- return e -- } -- if e := p.WriteI32(seqId); e != nil { -- return e -- } -- return nil --} -- --func (p *TSimpleJSONProtocol) WriteMessageEnd() error { -- return p.OutputListEnd() --} -- --func (p *TSimpleJSONProtocol) WriteStructBegin(name string) error { -- if e := p.OutputObjectBegin(); e != nil { -- return e -- } -- return nil --} -- --func (p *TSimpleJSONProtocol) WriteStructEnd() error { -- return p.OutputObjectEnd() --} -- --func (p *TSimpleJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { -- if e := p.WriteString(name); e != nil { -- return e -- } -- return nil --} -- --func (p *TSimpleJSONProtocol) WriteFieldEnd() error { -- //return p.OutputListEnd() -- return nil --} -- --func (p *TSimpleJSONProtocol) WriteFieldStop() error { return nil } -- --func (p *TSimpleJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { -- if e := p.OutputListBegin(); e != nil { -- return e -- } -- if e := p.WriteByte(int8(keyType)); e != nil { -- return e -- } -- if e := p.WriteByte(int8(valueType)); e != nil { -- return e -- } -- return p.WriteI32(int32(size)) --} -- --func (p *TSimpleJSONProtocol) WriteMapEnd() error { -- return p.OutputListEnd() --} -- --func (p *TSimpleJSONProtocol) WriteListBegin(elemType TType, size int) error { -- return p.OutputElemListBegin(elemType, size) --} -- --func (p *TSimpleJSONProtocol) WriteListEnd() error { -- return p.OutputListEnd() --} -- --func (p *TSimpleJSONProtocol) WriteSetBegin(elemType TType, size int) error { -- return p.OutputElemListBegin(elemType, size) --} -- --func (p *TSimpleJSONProtocol) WriteSetEnd() error { -- return p.OutputListEnd() --} -- --func (p *TSimpleJSONProtocol) WriteBool(b bool) error { -- return p.OutputBool(b) --} -- --func (p *TSimpleJSONProtocol) WriteByte(b int8) error { -- return p.WriteI32(int32(b)) --} -- --func (p *TSimpleJSONProtocol) WriteI16(v int16) error { -- return p.WriteI32(int32(v)) --} -- --func (p *TSimpleJSONProtocol) WriteI32(v int32) error { -- return p.OutputI64(int64(v)) --} -- --func (p *TSimpleJSONProtocol) WriteI64(v int64) error { -- return p.OutputI64(int64(v)) --} -- --func (p *TSimpleJSONProtocol) WriteDouble(v float64) error { -- return p.OutputF64(v) --} -- --func (p *TSimpleJSONProtocol) WriteString(v string) error { -- return p.OutputString(v) --} -- --func (p *TSimpleJSONProtocol) WriteBinary(v []byte) error { -- // JSON library only takes in a string, -- // not an arbitrary byte array, to ensure bytes are transmitted -- // efficiently we must convert this into a valid JSON string -- // therefore we use base64 encoding to avoid excessive escaping/quoting -- if e := p.OutputPreValue(); e != nil { -- return e -- } -- if _, e := p.write(JSON_QUOTE_BYTES); e != nil { -- return NewTProtocolException(e) -- } -- writer := base64.NewEncoder(base64.StdEncoding, p.writer) -- if _, e := writer.Write(v); e != nil { -- p.writer.Reset(p.trans) // THRIFT-3735 -- return NewTProtocolException(e) -- } -- if e := writer.Close(); e != nil { -- return NewTProtocolException(e) -- } -- if _, e := p.write(JSON_QUOTE_BYTES); e != nil { -- return NewTProtocolException(e) -- } -- return p.OutputPostValue() --} -- --// Reading methods. --func (p *TSimpleJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { -- p.resetContextStack() // THRIFT-3735 -- if isNull, err := p.ParseListBegin(); isNull || err != nil { -- return name, typeId, seqId, err -- } -- if name, err = p.ReadString(); err != nil { -- return name, typeId, seqId, err -- } -- bTypeId, err := p.ReadByte() -- typeId = TMessageType(bTypeId) -- if err != nil { -- return name, typeId, seqId, err -- } -- if seqId, err = p.ReadI32(); err != nil { -- return name, typeId, seqId, err -- } -- return name, typeId, seqId, nil --} -- --func (p *TSimpleJSONProtocol) ReadMessageEnd() error { -- return p.ParseListEnd() --} -- --func (p *TSimpleJSONProtocol) ReadStructBegin() (name string, err error) { -- _, err = p.ParseObjectStart() -- return """", err --} -- --func (p *TSimpleJSONProtocol) ReadStructEnd() error { -- return p.ParseObjectEnd() --} -- --func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { -- if err := p.ParsePreValue(); err != nil { -- return """", STOP, 0, err -- } -- b, _ := p.reader.Peek(1) -- if len(b) > 0 { -- switch b[0] { -- case JSON_RBRACE[0]: -- return """", STOP, 0, nil -- case JSON_QUOTE: -- p.reader.ReadByte() -- name, err := p.ParseStringBody() -- // simplejson is not meant to be read back into thrift -- // - see http://wiki.apache.org/thrift/ThriftUsageJava -- // - use JSON instead -- if err != nil { -- return name, STOP, 0, err -- } -- return name, STOP, -1, p.ParsePostValue() -- /* -- if err = p.ParsePostValue(); err != nil { -- return name, STOP, 0, err -- } -- if isNull, err := p.ParseListBegin(); isNull || err != nil { -- return name, STOP, 0, err -- } -- bType, err := p.ReadByte() -- thetype := TType(bType) -- if err != nil { -- return name, thetype, 0, err -- } -- id, err := p.ReadI16() -- return name, thetype, id, err -- */ -- } -- e := fmt.Errorf(""Expected \""}\"" or '\""', but found: '%s'"", string(b)) -- return """", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- return """", STOP, 0, NewTProtocolException(io.EOF) --} -- --func (p *TSimpleJSONProtocol) ReadFieldEnd() error { -- return nil -- //return p.ParseListEnd() --} -- --func (p *TSimpleJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) { -- if isNull, e := p.ParseListBegin(); isNull || e != nil { -- return VOID, VOID, 0, e -- } -- -- // read keyType -- bKeyType, e := p.ReadByte() -- keyType = TType(bKeyType) -- if e != nil { -- return keyType, valueType, size, e -- } -- -- // read valueType -- bValueType, e := p.ReadByte() -- valueType = TType(bValueType) -- if e != nil { -- return keyType, valueType, size, e -- } -- -- // read size -- iSize, err := p.ReadI64() -- size = int(iSize) -- return keyType, valueType, size, err --} -- --func (p *TSimpleJSONProtocol) ReadMapEnd() error { -- return p.ParseListEnd() --} -- --func (p *TSimpleJSONProtocol) ReadListBegin() (elemType TType, size int, e error) { -- return p.ParseElemListBegin() --} -- --func (p *TSimpleJSONProtocol) ReadListEnd() error { -- return p.ParseListEnd() --} -- --func (p *TSimpleJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) { -- return p.ParseElemListBegin() --} -- --func (p *TSimpleJSONProtocol) ReadSetEnd() error { -- return p.ParseListEnd() --} -- --func (p *TSimpleJSONProtocol) ReadBool() (bool, error) { -- var value bool -- -- if err := p.ParsePreValue(); err != nil { -- return value, err -- } -- f, _ := p.reader.Peek(1) -- if len(f) > 0 { -- switch f[0] { -- case JSON_TRUE[0]: -- b := make([]byte, len(JSON_TRUE)) -- _, err := p.reader.Read(b) -- if err != nil { -- return false, NewTProtocolException(err) -- } -- if string(b) == string(JSON_TRUE) { -- value = true -- } else { -- e := fmt.Errorf(""Expected \""true\"" but found: %s"", string(b)) -- return value, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- break -- case JSON_FALSE[0]: -- b := make([]byte, len(JSON_FALSE)) -- _, err := p.reader.Read(b) -- if err != nil { -- return false, NewTProtocolException(err) -- } -- if string(b) == string(JSON_FALSE) { -- value = false -- } else { -- e := fmt.Errorf(""Expected \""false\"" but found: %s"", string(b)) -- return value, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- break -- case JSON_NULL[0]: -- b := make([]byte, len(JSON_NULL)) -- _, err := p.reader.Read(b) -- if err != nil { -- return false, NewTProtocolException(err) -- } -- if string(b) == string(JSON_NULL) { -- value = false -- } else { -- e := fmt.Errorf(""Expected \""null\"" but found: %s"", string(b)) -- return value, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- default: -- e := fmt.Errorf(""Expected \""true\"", \""false\"", or \""null\"" but found: %s"", string(f)) -- return value, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- } -- return value, p.ParsePostValue() --} -- --func (p *TSimpleJSONProtocol) ReadByte() (int8, error) { -- v, err := p.ReadI64() -- return int8(v), err --} -- --func (p *TSimpleJSONProtocol) ReadI16() (int16, error) { -- v, err := p.ReadI64() -- return int16(v), err --} -- --func (p *TSimpleJSONProtocol) ReadI32() (int32, error) { -- v, err := p.ReadI64() -- return int32(v), err --} -- --func (p *TSimpleJSONProtocol) ReadI64() (int64, error) { -- v, _, err := p.ParseI64() -- return v, err --} -- --func (p *TSimpleJSONProtocol) ReadDouble() (float64, error) { -- v, _, err := p.ParseF64() -- return v, err --} -- --func (p *TSimpleJSONProtocol) ReadString() (string, error) { -- var v string -- if err := p.ParsePreValue(); err != nil { -- return v, err -- } -- f, _ := p.reader.Peek(1) -- if len(f) > 0 && f[0] == JSON_QUOTE { -- p.reader.ReadByte() -- value, err := p.ParseStringBody() -- v = value -- if err != nil { -- return v, err -- } -- } else if len(f) > 0 && f[0] == JSON_NULL[0] { -- b := make([]byte, len(JSON_NULL)) -- _, err := p.reader.Read(b) -- if err != nil { -- return v, NewTProtocolException(err) -- } -- if string(b) != string(JSON_NULL) { -- e := fmt.Errorf(""Expected a JSON string, found unquoted data started with %s"", string(b)) -- return v, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- } else { -- e := fmt.Errorf(""Expected a JSON string, found unquoted data started with %s"", string(f)) -- return v, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- return v, p.ParsePostValue() --} -- --func (p *TSimpleJSONProtocol) ReadBinary() ([]byte, error) { -- var v []byte -- if err := p.ParsePreValue(); err != nil { -- return nil, err -- } -- f, _ := p.reader.Peek(1) -- if len(f) > 0 && f[0] == JSON_QUOTE { -- p.reader.ReadByte() -- value, err := p.ParseBase64EncodedBody() -- v = value -- if err != nil { -- return v, err -- } -- } else if len(f) > 0 && f[0] == JSON_NULL[0] { -- b := make([]byte, len(JSON_NULL)) -- _, err := p.reader.Read(b) -- if err != nil { -- return v, NewTProtocolException(err) -- } -- if string(b) != string(JSON_NULL) { -- e := fmt.Errorf(""Expected a JSON string, found unquoted data started with %s"", string(b)) -- return v, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- } else { -- e := fmt.Errorf(""Expected a JSON string, found unquoted data started with %s"", string(f)) -- return v, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- -- return v, p.ParsePostValue() --} -- --func (p *TSimpleJSONProtocol) Flush() (err error) { -- return NewTProtocolException(p.writer.Flush()) --} -- --func (p *TSimpleJSONProtocol) Skip(fieldType TType) (err error) { -- return SkipDefaultDepth(p, fieldType) --} -- --func (p *TSimpleJSONProtocol) Transport() TTransport { -- return p.trans --} -- --func (p *TSimpleJSONProtocol) OutputPreValue() error { -- cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1]) -- switch cxt { -- case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY: -- if _, e := p.write(JSON_COMMA); e != nil { -- return NewTProtocolException(e) -- } -- break -- case _CONTEXT_IN_OBJECT_NEXT_VALUE: -- if _, e := p.write(JSON_COLON); e != nil { -- return NewTProtocolException(e) -- } -- break -- } -- return nil --} -- --func (p *TSimpleJSONProtocol) OutputPostValue() error { -- cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1]) -- switch cxt { -- case _CONTEXT_IN_LIST_FIRST: -- p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] -- p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST)) -- break -- case _CONTEXT_IN_OBJECT_FIRST: -- p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] -- p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) -- break -- case _CONTEXT_IN_OBJECT_NEXT_KEY: -- p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] -- p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) -- break -- case _CONTEXT_IN_OBJECT_NEXT_VALUE: -- p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] -- p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_KEY)) -- break -- } -- return nil --} -- --func (p *TSimpleJSONProtocol) OutputBool(value bool) error { -- if e := p.OutputPreValue(); e != nil { -- return e -- } -- var v string -- if value { -- v = string(JSON_TRUE) -- } else { -- v = string(JSON_FALSE) -- } -- switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { -- case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: -- v = jsonQuote(v) -- default: -- } -- if e := p.OutputStringData(v); e != nil { -- return e -- } -- return p.OutputPostValue() --} -- --func (p *TSimpleJSONProtocol) OutputNull() error { -- if e := p.OutputPreValue(); e != nil { -- return e -- } -- if _, e := p.write(JSON_NULL); e != nil { -- return NewTProtocolException(e) -- } -- return p.OutputPostValue() --} -- --func (p *TSimpleJSONProtocol) OutputF64(value float64) error { -- if e := p.OutputPreValue(); e != nil { -- return e -- } -- var v string -- if math.IsNaN(value) { -- v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE) -- } else if math.IsInf(value, 1) { -- v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE) -- } else if math.IsInf(value, -1) { -- v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE) -- } else { -- v = strconv.FormatFloat(value, 'g', -1, 64) -- switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { -- case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: -- v = string(JSON_QUOTE) + v + string(JSON_QUOTE) -- default: -- } -- } -- if e := p.OutputStringData(v); e != nil { -- return e -- } -- return p.OutputPostValue() --} -- --func (p *TSimpleJSONProtocol) OutputI64(value int64) error { -- if e := p.OutputPreValue(); e != nil { -- return e -- } -- v := strconv.FormatInt(value, 10) -- switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { -- case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: -- v = jsonQuote(v) -- default: -- } -- if e := p.OutputStringData(v); e != nil { -- return e -- } -- return p.OutputPostValue() --} -- --func (p *TSimpleJSONProtocol) OutputString(s string) error { -- if e := p.OutputPreValue(); e != nil { -- return e -- } -- if e := p.OutputStringData(jsonQuote(s)); e != nil { -- return e -- } -- return p.OutputPostValue() --} -- --func (p *TSimpleJSONProtocol) OutputStringData(s string) error { -- _, e := p.write([]byte(s)) -- return NewTProtocolException(e) --} -- --func (p *TSimpleJSONProtocol) OutputObjectBegin() error { -- if e := p.OutputPreValue(); e != nil { -- return e -- } -- if _, e := p.write(JSON_LBRACE); e != nil { -- return NewTProtocolException(e) -- } -- p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_FIRST)) -- return nil --} -- --func (p *TSimpleJSONProtocol) OutputObjectEnd() error { -- if _, e := p.write(JSON_RBRACE); e != nil { -- return NewTProtocolException(e) -- } -- p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] -- if e := p.OutputPostValue(); e != nil { -- return e -- } -- return nil --} -- --func (p *TSimpleJSONProtocol) OutputListBegin() error { -- if e := p.OutputPreValue(); e != nil { -- return e -- } -- if _, e := p.write(JSON_LBRACKET); e != nil { -- return NewTProtocolException(e) -- } -- p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST_FIRST)) -- return nil --} -- --func (p *TSimpleJSONProtocol) OutputListEnd() error { -- if _, e := p.write(JSON_RBRACKET); e != nil { -- return NewTProtocolException(e) -- } -- p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] -- if e := p.OutputPostValue(); e != nil { -- return e -- } -- return nil --} -- --func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error { -- if e := p.OutputListBegin(); e != nil { -- return e -- } -- if e := p.WriteByte(int8(elemType)); e != nil { -- return e -- } -- if e := p.WriteI64(int64(size)); e != nil { -- return e -- } -- return nil --} -- --func (p *TSimpleJSONProtocol) ParsePreValue() error { -- if e := p.readNonSignificantWhitespace(); e != nil { -- return NewTProtocolException(e) -- } -- cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) -- b, _ := p.reader.Peek(1) -- switch cxt { -- case _CONTEXT_IN_LIST: -- if len(b) > 0 { -- switch b[0] { -- case JSON_RBRACKET[0]: -- return nil -- case JSON_COMMA[0]: -- p.reader.ReadByte() -- if e := p.readNonSignificantWhitespace(); e != nil { -- return NewTProtocolException(e) -- } -- return nil -- default: -- e := fmt.Errorf(""Expected \""]\"" or \"",\"" in list context, but found \""%s\"""", string(b)) -- return NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- } -- break -- case _CONTEXT_IN_OBJECT_NEXT_KEY: -- if len(b) > 0 { -- switch b[0] { -- case JSON_RBRACE[0]: -- return nil -- case JSON_COMMA[0]: -- p.reader.ReadByte() -- if e := p.readNonSignificantWhitespace(); e != nil { -- return NewTProtocolException(e) -- } -- return nil -- default: -- e := fmt.Errorf(""Expected \""}\"" or \"",\"" in object context, but found \""%s\"""", string(b)) -- return NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- } -- break -- case _CONTEXT_IN_OBJECT_NEXT_VALUE: -- if len(b) > 0 { -- switch b[0] { -- case JSON_COLON[0]: -- p.reader.ReadByte() -- if e := p.readNonSignificantWhitespace(); e != nil { -- return NewTProtocolException(e) -- } -- return nil -- default: -- e := fmt.Errorf(""Expected \"":\"" in object context, but found \""%s\"""", string(b)) -- return NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- } -- break -- } -- return nil --} -- --func (p *TSimpleJSONProtocol) ParsePostValue() error { -- if e := p.readNonSignificantWhitespace(); e != nil { -- return NewTProtocolException(e) -- } -- cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) -- switch cxt { -- case _CONTEXT_IN_LIST_FIRST: -- p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] -- p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST)) -- break -- case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: -- p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] -- p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) -- break -- case _CONTEXT_IN_OBJECT_NEXT_VALUE: -- p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] -- p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_KEY)) -- break -- } -- return nil --} -- --func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error { -- for { -- b, _ := p.reader.Peek(1) -- if len(b) < 1 { -- return nil -- } -- switch b[0] { -- case ' ', '\r', '\n', '\t': -- p.reader.ReadByte() -- continue -- default: -- break -- } -- break -- } -- return nil --} -- --func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) { -- line, err := p.reader.ReadString(JSON_QUOTE) -- if err != nil { -- return """", NewTProtocolException(err) -- } -- l := len(line) -- // count number of escapes to see if we need to keep going -- i := 1 -- for ; i < l; i++ { -- if line[l-i-1] != '\\' { -- break -- } -- } -- if i&0x01 == 1 { -- v, ok := jsonUnquote(string(JSON_QUOTE) + line) -- if !ok { -- return """", NewTProtocolException(err) -- } -- return v, nil -- } -- s, err := p.ParseQuotedStringBody() -- if err != nil { -- return """", NewTProtocolException(err) -- } -- str := string(JSON_QUOTE) + line + s -- v, ok := jsonUnquote(str) -- if !ok { -- e := fmt.Errorf(""Unable to parse as JSON string %s"", str) -- return """", NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- return v, nil --} -- --func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) { -- line, err := p.reader.ReadString(JSON_QUOTE) -- if err != nil { -- return """", NewTProtocolException(err) -- } -- l := len(line) -- // count number of escapes to see if we need to keep going -- i := 1 -- for ; i < l; i++ { -- if line[l-i-1] != '\\' { -- break -- } -- } -- if i&0x01 == 1 { -- return line, nil -- } -- s, err := p.ParseQuotedStringBody() -- if err != nil { -- return """", NewTProtocolException(err) -- } -- v := line + s -- return v, nil --} -- --func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) { -- line, err := p.reader.ReadBytes(JSON_QUOTE) -- if err != nil { -- return line, NewTProtocolException(err) -- } -- line2 := line[0 : len(line)-1] -- l := len(line2) -- if (l % 4) != 0 { -- pad := 4 - (l % 4) -- fill := [...]byte{'=', '=', '='} -- line2 = append(line2, fill[:pad]...) -- l = len(line2) -- } -- output := make([]byte, base64.StdEncoding.DecodedLen(l)) -- n, err := base64.StdEncoding.Decode(output, line2) -- return output[0:n], NewTProtocolException(err) --} -- --func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) { -- if err := p.ParsePreValue(); err != nil { -- return 0, false, err -- } -- var value int64 -- var isnull bool -- if p.safePeekContains(JSON_NULL) { -- p.reader.Read(make([]byte, len(JSON_NULL))) -- isnull = true -- } else { -- num, err := p.readNumeric() -- isnull = (num == nil) -- if !isnull { -- value = num.Int64() -- } -- if err != nil { -- return value, isnull, err -- } -- } -- return value, isnull, p.ParsePostValue() --} -- --func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) { -- if err := p.ParsePreValue(); err != nil { -- return 0, false, err -- } -- var value float64 -- var isnull bool -- if p.safePeekContains(JSON_NULL) { -- p.reader.Read(make([]byte, len(JSON_NULL))) -- isnull = true -- } else { -- num, err := p.readNumeric() -- isnull = (num == nil) -- if !isnull { -- value = num.Float64() -- } -- if err != nil { -- return value, isnull, err -- } -- } -- return value, isnull, p.ParsePostValue() --} -- --func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) { -- if err := p.ParsePreValue(); err != nil { -- return false, err -- } -- var b []byte -- b, err := p.reader.Peek(1) -- if err != nil { -- return false, err -- } -- if len(b) > 0 && b[0] == JSON_LBRACE[0] { -- p.reader.ReadByte() -- p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_FIRST)) -- return false, nil -- } else if p.safePeekContains(JSON_NULL) { -- return true, nil -- } -- e := fmt.Errorf(""Expected '{' or null, but found '%s'"", string(b)) -- return false, NewTProtocolExceptionWithType(INVALID_DATA, e) --} -- --func (p *TSimpleJSONProtocol) ParseObjectEnd() error { -- if isNull, err := p.readIfNull(); isNull || err != nil { -- return err -- } -- cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) -- if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) { -- e := fmt.Errorf(""Expected to be in the Object Context, but not in Object Context (%d)"", cxt) -- return NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- line, err := p.reader.ReadString(JSON_RBRACE[0]) -- if err != nil { -- return NewTProtocolException(err) -- } -- for _, char := range line { -- switch char { -- default: -- e := fmt.Errorf(""Expecting end of object \""}\"", but found: \""%s\"""", line) -- return NewTProtocolExceptionWithType(INVALID_DATA, e) -- case ' ', '\n', '\r', '\t', '}': -- break -- } -- } -- p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] -- return p.ParsePostValue() --} -- --func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) { -- if e := p.ParsePreValue(); e != nil { -- return false, e -- } -- var b []byte -- b, err = p.reader.Peek(1) -- if err != nil { -- return false, err -- } -- if len(b) >= 1 && b[0] == JSON_LBRACKET[0] { -- p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST_FIRST)) -- p.reader.ReadByte() -- isNull = false -- } else if p.safePeekContains(JSON_NULL) { -- isNull = true -- } else { -- err = fmt.Errorf(""Expected \""null\"" or \""[\"", received %q"", b) -- } -- return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err) --} -- --func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { -- if isNull, e := p.ParseListBegin(); isNull || e != nil { -- return VOID, 0, e -- } -- bElemType, err := p.ReadByte() -- elemType = TType(bElemType) -- if err != nil { -- return elemType, size, err -- } -- nSize, err2 := p.ReadI64() -- size = int(nSize) -- return elemType, size, err2 --} -- --func (p *TSimpleJSONProtocol) ParseListEnd() error { -- if isNull, err := p.readIfNull(); isNull || err != nil { -- return err -- } -- cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) -- if cxt != _CONTEXT_IN_LIST { -- e := fmt.Errorf(""Expected to be in the List Context, but not in List Context (%d)"", cxt) -- return NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- line, err := p.reader.ReadString(JSON_RBRACKET[0]) -- if err != nil { -- return NewTProtocolException(err) -- } -- for _, char := range line { -- switch char { -- default: -- e := fmt.Errorf(""Expecting end of list \""]\"", but found: \"""", line, ""\"""") -- return NewTProtocolExceptionWithType(INVALID_DATA, e) -- case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]): -- break -- } -- } -- p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] -- if _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) == _CONTEXT_IN_TOPLEVEL { -- return nil -- } -- return p.ParsePostValue() --} -- --func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) { -- e := p.readNonSignificantWhitespace() -- if e != nil { -- return nil, VOID, NewTProtocolException(e) -- } -- b, e := p.reader.Peek(1) -- if len(b) > 0 { -- c := b[0] -- switch c { -- case JSON_NULL[0]: -- buf := make([]byte, len(JSON_NULL)) -- _, e := p.reader.Read(buf) -- if e != nil { -- return nil, VOID, NewTProtocolException(e) -- } -- if string(JSON_NULL) != string(buf) { -- e = mismatch(string(JSON_NULL), string(buf)) -- return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- return nil, VOID, nil -- case JSON_QUOTE: -- p.reader.ReadByte() -- v, e := p.ParseStringBody() -- if e != nil { -- return v, UTF8, NewTProtocolException(e) -- } -- if v == JSON_INFINITY { -- return INFINITY, DOUBLE, nil -- } else if v == JSON_NEGATIVE_INFINITY { -- return NEGATIVE_INFINITY, DOUBLE, nil -- } else if v == JSON_NAN { -- return NAN, DOUBLE, nil -- } -- return v, UTF8, nil -- case JSON_TRUE[0]: -- buf := make([]byte, len(JSON_TRUE)) -- _, e := p.reader.Read(buf) -- if e != nil { -- return true, BOOL, NewTProtocolException(e) -- } -- if string(JSON_TRUE) != string(buf) { -- e := mismatch(string(JSON_TRUE), string(buf)) -- return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- return true, BOOL, nil -- case JSON_FALSE[0]: -- buf := make([]byte, len(JSON_FALSE)) -- _, e := p.reader.Read(buf) -- if e != nil { -- return false, BOOL, NewTProtocolException(e) -- } -- if string(JSON_FALSE) != string(buf) { -- e := mismatch(string(JSON_FALSE), string(buf)) -- return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- return false, BOOL, nil -- case JSON_LBRACKET[0]: -- _, e := p.reader.ReadByte() -- return make([]interface{}, 0), LIST, NewTProtocolException(e) -- case JSON_LBRACE[0]: -- _, e := p.reader.ReadByte() -- return make(map[string]interface{}), STRUCT, NewTProtocolException(e) -- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]: -- // assume numeric -- v, e := p.readNumeric() -- return v, DOUBLE, e -- default: -- e := fmt.Errorf(""Expected element in list but found '%s' while parsing JSON."", string(c)) -- return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- } -- e = fmt.Errorf(""Cannot read a single element while parsing JSON."") -- return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) -- --} -- --func (p *TSimpleJSONProtocol) readIfNull() (bool, error) { -- cont := true -- for cont { -- b, _ := p.reader.Peek(1) -- if len(b) < 1 { -- return false, nil -- } -- switch b[0] { -- default: -- return false, nil -- case JSON_NULL[0]: -- cont = false -- break -- case ' ', '\n', '\r', '\t': -- p.reader.ReadByte() -- break -- } -- } -- if p.safePeekContains(JSON_NULL) { -- p.reader.Read(make([]byte, len(JSON_NULL))) -- return true, nil -- } -- return false, nil --} -- --func (p *TSimpleJSONProtocol) readQuoteIfNext() { -- b, _ := p.reader.Peek(1) -- if len(b) > 0 && b[0] == JSON_QUOTE { -- p.reader.ReadByte() -- } --} -- --func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) { -- isNull, err := p.readIfNull() -- if isNull || err != nil { -- return NUMERIC_NULL, err -- } -- hasDecimalPoint := false -- nextCanBeSign := true -- hasE := false -- MAX_LEN := 40 -- buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN)) -- continueFor := true -- inQuotes := false -- for continueFor { -- c, err := p.reader.ReadByte() -- if err != nil { -- if err == io.EOF { -- break -- } -- return NUMERIC_NULL, NewTProtocolException(err) -- } -- switch c { -- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': -- buf.WriteByte(c) -- nextCanBeSign = false -- case '.': -- if hasDecimalPoint { -- e := fmt.Errorf(""Unable to parse number with multiple decimal points '%s.'"", buf.String()) -- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- if hasE { -- e := fmt.Errorf(""Unable to parse number with decimal points in the exponent '%s.'"", buf.String()) -- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- buf.WriteByte(c) -- hasDecimalPoint, nextCanBeSign = true, false -- case 'e', 'E': -- if hasE { -- e := fmt.Errorf(""Unable to parse number with multiple exponents '%s%c'"", buf.String(), c) -- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- buf.WriteByte(c) -- hasE, nextCanBeSign = true, true -- case '-', '+': -- if !nextCanBeSign { -- e := fmt.Errorf(""Negative sign within number"") -- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- buf.WriteByte(c) -- nextCanBeSign = false -- case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]: -- p.reader.UnreadByte() -- continueFor = false -- case JSON_NAN[0]: -- if buf.Len() == 0 { -- buffer := make([]byte, len(JSON_NAN)) -- buffer[0] = c -- _, e := p.reader.Read(buffer[1:]) -- if e != nil { -- return NUMERIC_NULL, NewTProtocolException(e) -- } -- if JSON_NAN != string(buffer) { -- e := mismatch(JSON_NAN, string(buffer)) -- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- if inQuotes { -- p.readQuoteIfNext() -- } -- return NAN, nil -- } else { -- e := fmt.Errorf(""Unable to parse number starting with character '%c'"", c) -- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- case JSON_INFINITY[0]: -- if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') { -- buffer := make([]byte, len(JSON_INFINITY)) -- buffer[0] = c -- _, e := p.reader.Read(buffer[1:]) -- if e != nil { -- return NUMERIC_NULL, NewTProtocolException(e) -- } -- if JSON_INFINITY != string(buffer) { -- e := mismatch(JSON_INFINITY, string(buffer)) -- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- if inQuotes { -- p.readQuoteIfNext() -- } -- return INFINITY, nil -- } else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] { -- buffer := make([]byte, len(JSON_NEGATIVE_INFINITY)) -- buffer[0] = JSON_NEGATIVE_INFINITY[0] -- buffer[1] = c -- _, e := p.reader.Read(buffer[2:]) -- if e != nil { -- return NUMERIC_NULL, NewTProtocolException(e) -- } -- if JSON_NEGATIVE_INFINITY != string(buffer) { -- e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer)) -- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- if inQuotes { -- p.readQuoteIfNext() -- } -- return NEGATIVE_INFINITY, nil -- } else { -- e := fmt.Errorf(""Unable to parse number starting with character '%c' due to existing buffer %s"", c, buf.String()) -- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- case JSON_QUOTE: -- if !inQuotes { -- inQuotes = true -- } else { -- break -- } -- default: -- e := fmt.Errorf(""Unable to parse number starting with character '%c'"", c) -- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- } -- if buf.Len() == 0 { -- e := fmt.Errorf(""Unable to parse number from empty string ''"") -- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) -- } -- return NewNumericFromJSONString(buf.String(), false), nil --} -- --// Safely peeks into the buffer, reading only what is necessary --func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool { -- for i := 0; i < len(b); i++ { -- a, _ := p.reader.Peek(i + 1) -- if len(a) == 0 || a[i] != b[i] { -- return false -- } -- } -- return true --} -- --// Reset the context stack to its initial state. --func (p *TSimpleJSONProtocol) resetContextStack() { -- p.parseContextStack = []int{int(_CONTEXT_IN_TOPLEVEL)} -- p.dumpContext = []int{int(_CONTEXT_IN_TOPLEVEL)} --} -- --func (p *TSimpleJSONProtocol) write(b []byte) (int, error) { -- n, err := p.writer.Write(b) -- if err != nil { -- p.writer.Reset(p.trans) // THRIFT-3735 -- } -- return n, err --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go b/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go -deleted file mode 100644 -index 37081bd835b56..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go -+++ /dev/null -@@ -1,215 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""log"" -- ""runtime/debug"" -- ""sync"" -- ""sync/atomic"" --) -- --/* -- * This is not a typical TSimpleServer as it is not blocked after accept a socket. -- * It is more like a TThreadedServer that can handle different connections in different goroutines. -- * This will work if golang user implements a conn-pool like thing in client side. -- */ --type TSimpleServer struct { -- closed int32 -- wg sync.WaitGroup -- mu sync.Mutex -- -- processorFactory TProcessorFactory -- serverTransport TServerTransport -- inputTransportFactory TTransportFactory -- outputTransportFactory TTransportFactory -- inputProtocolFactory TProtocolFactory -- outputProtocolFactory TProtocolFactory --} -- --func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer { -- return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport) --} -- --func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { -- return NewTSimpleServerFactory4(NewTProcessorFactory(processor), -- serverTransport, -- transportFactory, -- protocolFactory, -- ) --} -- --func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { -- return NewTSimpleServerFactory6(NewTProcessorFactory(processor), -- serverTransport, -- inputTransportFactory, -- outputTransportFactory, -- inputProtocolFactory, -- outputProtocolFactory, -- ) --} -- --func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer { -- return NewTSimpleServerFactory6(processorFactory, -- serverTransport, -- NewTTransportFactory(), -- NewTTransportFactory(), -- NewTBinaryProtocolFactoryDefault(), -- NewTBinaryProtocolFactoryDefault(), -- ) --} -- --func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { -- return NewTSimpleServerFactory6(processorFactory, -- serverTransport, -- transportFactory, -- transportFactory, -- protocolFactory, -- protocolFactory, -- ) --} -- --func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { -- return &TSimpleServer{ -- processorFactory: processorFactory, -- serverTransport: serverTransport, -- inputTransportFactory: inputTransportFactory, -- outputTransportFactory: outputTransportFactory, -- inputProtocolFactory: inputProtocolFactory, -- outputProtocolFactory: outputProtocolFactory, -- } --} -- --func (p *TSimpleServer) ProcessorFactory() TProcessorFactory { -- return p.processorFactory --} -- --func (p *TSimpleServer) ServerTransport() TServerTransport { -- return p.serverTransport --} -- --func (p *TSimpleServer) InputTransportFactory() TTransportFactory { -- return p.inputTransportFactory --} -- --func (p *TSimpleServer) OutputTransportFactory() TTransportFactory { -- return p.outputTransportFactory --} -- --func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory { -- return p.inputProtocolFactory --} -- --func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory { -- return p.outputProtocolFactory --} -- --func (p *TSimpleServer) Listen() error { -- return p.serverTransport.Listen() --} -- --func (p *TSimpleServer) AcceptLoop() error { -- for { -- client, err := p.serverTransport.Accept() -- p.mu.Lock() -- if atomic.LoadInt32(&p.closed) != 0 { -- return nil -- } -- if err != nil { -- return err -- } -- if client != nil { -- p.wg.Add(1) -- go func() { -- defer p.wg.Done() -- if err := p.processRequests(client); err != nil { -- log.Println(""error processing request:"", err) -- } -- }() -- } -- p.mu.Unlock() -- } --} -- --func (p *TSimpleServer) Serve() error { -- err := p.Listen() -- if err != nil { -- return err -- } -- p.AcceptLoop() -- return nil --} -- --func (p *TSimpleServer) Stop() error { -- p.mu.Lock() -- defer p.mu.Unlock() -- if atomic.LoadInt32(&p.closed) != 0 { -- return nil -- } -- atomic.StoreInt32(&p.closed, 1) -- p.serverTransport.Interrupt() -- p.wg.Wait() -- return nil --} -- --func (p *TSimpleServer) processRequests(client TTransport) error { -- processor := p.processorFactory.GetProcessor(client) -- inputTransport, err := p.inputTransportFactory.GetTransport(client) -- if err != nil { -- return err -- } -- outputTransport, err := p.outputTransportFactory.GetTransport(client) -- if err != nil { -- return err -- } -- inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport) -- outputProtocol := p.outputProtocolFactory.GetProtocol(outputTransport) -- defer func() { -- if e := recover(); e != nil { -- log.Printf(""panic in processor: %s: %s"", e, debug.Stack()) -- } -- }() -- -- if inputTransport != nil { -- defer inputTransport.Close() -- } -- if outputTransport != nil { -- defer outputTransport.Close() -- } -- for { -- if atomic.LoadInt32(&p.closed) != 0 { -- return nil -- } -- -- ok, err := processor.Process(defaultCtx, inputProtocol, outputProtocol) -- if err, ok := err.(TTransportException); ok && err.TypeId() == END_OF_FILE { -- return nil -- } else if err != nil { -- return err -- } -- if err, ok := err.(TApplicationException); ok && err.TypeId() == UNKNOWN_METHOD { -- continue -- } -- if !ok { -- break -- } -- } -- return nil --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/socket.go -deleted file mode 100644 -index 383b1fe3e973c..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/socket.go -+++ /dev/null -@@ -1,165 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""net"" -- ""time"" --) -- --type TSocket struct { -- conn net.Conn -- addr net.Addr -- timeout time.Duration --} -- --// NewTSocket creates a net.Conn-backed TTransport, given a host and port --// --// Example: --// trans, err := thrift.NewTSocket(""localhost:9090"") --func NewTSocket(hostPort string) (*TSocket, error) { -- return NewTSocketTimeout(hostPort, 0) --} -- --// NewTSocketTimeout creates a net.Conn-backed TTransport, given a host and port --// it also accepts a timeout as a time.Duration --func NewTSocketTimeout(hostPort string, timeout time.Duration) (*TSocket, error) { -- //conn, err := net.DialTimeout(network, address, timeout) -- addr, err := net.ResolveTCPAddr(""tcp"", hostPort) -- if err != nil { -- return nil, err -- } -- return NewTSocketFromAddrTimeout(addr, timeout), nil --} -- --// Creates a TSocket from a net.Addr --func NewTSocketFromAddrTimeout(addr net.Addr, timeout time.Duration) *TSocket { -- return &TSocket{addr: addr, timeout: timeout} --} -- --// Creates a TSocket from an existing net.Conn --func NewTSocketFromConnTimeout(conn net.Conn, timeout time.Duration) *TSocket { -- return &TSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout} --} -- --// Sets the socket timeout --func (p *TSocket) SetTimeout(timeout time.Duration) error { -- p.timeout = timeout -- return nil --} -- --func (p *TSocket) pushDeadline(read, write bool) { -- var t time.Time -- if p.timeout > 0 { -- t = time.Now().Add(time.Duration(p.timeout)) -- } -- if read && write { -- p.conn.SetDeadline(t) -- } else if read { -- p.conn.SetReadDeadline(t) -- } else if write { -- p.conn.SetWriteDeadline(t) -- } --} -- --// Connects the socket, creating a new socket object if necessary. --func (p *TSocket) Open() error { -- if p.IsOpen() { -- return NewTTransportException(ALREADY_OPEN, ""Socket already connected."") -- } -- if p.addr == nil { -- return NewTTransportException(NOT_OPEN, ""Cannot open nil address."") -- } -- if len(p.addr.Network()) == 0 { -- return NewTTransportException(NOT_OPEN, ""Cannot open bad network name."") -- } -- if len(p.addr.String()) == 0 { -- return NewTTransportException(NOT_OPEN, ""Cannot open bad address."") -- } -- var err error -- if p.conn, err = net.DialTimeout(p.addr.Network(), p.addr.String(), p.timeout); err != nil { -- return NewTTransportException(NOT_OPEN, err.Error()) -- } -- return nil --} -- --// Retrieve the underlying net.Conn --func (p *TSocket) Conn() net.Conn { -- return p.conn --} -- --// Returns true if the connection is open --func (p *TSocket) IsOpen() bool { -- if p.conn == nil { -- return false -- } -- return true --} -- --// Closes the socket. --func (p *TSocket) Close() error { -- // Close the socket -- if p.conn != nil { -- err := p.conn.Close() -- if err != nil { -- return err -- } -- p.conn = nil -- } -- return nil --} -- --//Returns the remote address of the socket. --func (p *TSocket) Addr() net.Addr { -- return p.addr --} -- --func (p *TSocket) Read(buf []byte) (int, error) { -- if !p.IsOpen() { -- return 0, NewTTransportException(NOT_OPEN, ""Connection not open"") -- } -- p.pushDeadline(true, false) -- n, err := p.conn.Read(buf) -- return n, NewTTransportExceptionFromError(err) --} -- --func (p *TSocket) Write(buf []byte) (int, error) { -- if !p.IsOpen() { -- return 0, NewTTransportException(NOT_OPEN, ""Connection not open"") -- } -- p.pushDeadline(false, true) -- return p.conn.Write(buf) --} -- --func (p *TSocket) Flush() error { -- return nil --} -- --func (p *TSocket) Interrupt() error { -- if !p.IsOpen() { -- return nil -- } -- return p.conn.Close() --} -- --func (p *TSocket) RemainingBytes() (num_bytes uint64) { -- const maxSize = ^uint64(0) -- return maxSize // the thruth is, we just don't know unless framed is used --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go -deleted file mode 100644 -index 907afca326fe0..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go -+++ /dev/null -@@ -1,112 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""crypto/tls"" -- ""net"" -- ""time"" --) -- --type TSSLServerSocket struct { -- listener net.Listener -- addr net.Addr -- clientTimeout time.Duration -- interrupted bool -- cfg *tls.Config --} -- --func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) { -- return NewTSSLServerSocketTimeout(listenAddr, cfg, 0) --} -- --func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) { -- if cfg.MinVersion == 0 { -- cfg.MinVersion = tls.VersionTLS10 -- } -- addr, err := net.ResolveTCPAddr(""tcp"", listenAddr) -- if err != nil { -- return nil, err -- } -- return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil --} -- --func (p *TSSLServerSocket) Listen() error { -- if p.IsListening() { -- return nil -- } -- l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg) -- if err != nil { -- return err -- } -- p.listener = l -- return nil --} -- --func (p *TSSLServerSocket) Accept() (TTransport, error) { -- if p.interrupted { -- return nil, errTransportInterrupted -- } -- if p.listener == nil { -- return nil, NewTTransportException(NOT_OPEN, ""No underlying server socket"") -- } -- conn, err := p.listener.Accept() -- if err != nil { -- return nil, NewTTransportExceptionFromError(err) -- } -- return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil --} -- --// Checks whether the socket is listening. --func (p *TSSLServerSocket) IsListening() bool { -- return p.listener != nil --} -- --// Connects the socket, creating a new socket object if necessary. --func (p *TSSLServerSocket) Open() error { -- if p.IsListening() { -- return NewTTransportException(ALREADY_OPEN, ""Server socket already open"") -- } -- if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil { -- return err -- } else { -- p.listener = l -- } -- return nil --} -- --func (p *TSSLServerSocket) Addr() net.Addr { -- return p.addr --} -- --func (p *TSSLServerSocket) Close() error { -- defer func() { -- p.listener = nil -- }() -- if p.IsListening() { -- return p.listener.Close() -- } -- return nil --} -- --func (p *TSSLServerSocket) Interrupt() error { -- p.interrupted = true -- return nil --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go -deleted file mode 100644 -index c3bd72cc467eb..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go -+++ /dev/null -@@ -1,175 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""crypto/tls"" -- ""net"" -- ""time"" --) -- --type TSSLSocket struct { -- conn net.Conn -- // hostPort contains host:port (e.g. ""asdf.com:12345""). The field is -- // only valid if addr is nil. -- hostPort string -- // addr is nil when hostPort is not """", and is only used when the -- // TSSLSocket is constructed from a net.Addr. -- addr net.Addr -- timeout time.Duration -- cfg *tls.Config --} -- --// NewTSSLSocket creates a net.Conn-backed TTransport, given a host and port and tls Configuration --// --// Example: --// trans, err := thrift.NewTSSLSocket(""localhost:9090"", nil) --func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) { -- return NewTSSLSocketTimeout(hostPort, cfg, 0) --} -- --// NewTSSLSocketTimeout creates a net.Conn-backed TTransport, given a host and port --// it also accepts a tls Configuration and a timeout as a time.Duration --func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, timeout time.Duration) (*TSSLSocket, error) { -- if cfg.MinVersion == 0 { -- cfg.MinVersion = tls.VersionTLS10 -- } -- return &TSSLSocket{hostPort: hostPort, timeout: timeout, cfg: cfg}, nil --} -- --// Creates a TSSLSocket from a net.Addr --func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, timeout time.Duration) *TSSLSocket { -- return &TSSLSocket{addr: addr, timeout: timeout, cfg: cfg} --} -- --// Creates a TSSLSocket from an existing net.Conn --func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, timeout time.Duration) *TSSLSocket { -- return &TSSLSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout, cfg: cfg} --} -- --// Sets the socket timeout --func (p *TSSLSocket) SetTimeout(timeout time.Duration) error { -- p.timeout = timeout -- return nil --} -- --func (p *TSSLSocket) pushDeadline(read, write bool) { -- var t time.Time -- if p.timeout > 0 { -- t = time.Now().Add(time.Duration(p.timeout)) -- } -- if read && write { -- p.conn.SetDeadline(t) -- } else if read { -- p.conn.SetReadDeadline(t) -- } else if write { -- p.conn.SetWriteDeadline(t) -- } --} -- --// Connects the socket, creating a new socket object if necessary. --func (p *TSSLSocket) Open() error { -- var err error -- // If we have a hostname, we need to pass the hostname to tls.Dial for -- // certificate hostname checks. -- if p.hostPort != """" { -- if p.conn, err = tls.DialWithDialer(&net.Dialer{ -- Timeout: p.timeout}, ""tcp"", p.hostPort, p.cfg); err != nil { -- return NewTTransportException(NOT_OPEN, err.Error()) -- } -- } else { -- if p.IsOpen() { -- return NewTTransportException(ALREADY_OPEN, ""Socket already connected."") -- } -- if p.addr == nil { -- return NewTTransportException(NOT_OPEN, ""Cannot open nil address."") -- } -- if len(p.addr.Network()) == 0 { -- return NewTTransportException(NOT_OPEN, ""Cannot open bad network name."") -- } -- if len(p.addr.String()) == 0 { -- return NewTTransportException(NOT_OPEN, ""Cannot open bad address."") -- } -- if p.conn, err = tls.DialWithDialer(&net.Dialer{ -- Timeout: p.timeout}, p.addr.Network(), p.addr.String(), p.cfg); err != nil { -- return NewTTransportException(NOT_OPEN, err.Error()) -- } -- } -- return nil --} -- --// Retrieve the underlying net.Conn --func (p *TSSLSocket) Conn() net.Conn { -- return p.conn --} -- --// Returns true if the connection is open --func (p *TSSLSocket) IsOpen() bool { -- if p.conn == nil { -- return false -- } -- return true --} -- --// Closes the socket. --func (p *TSSLSocket) Close() error { -- // Close the socket -- if p.conn != nil { -- err := p.conn.Close() -- if err != nil { -- return err -- } -- p.conn = nil -- } -- return nil --} -- --func (p *TSSLSocket) Read(buf []byte) (int, error) { -- if !p.IsOpen() { -- return 0, NewTTransportException(NOT_OPEN, ""Connection not open"") -- } -- p.pushDeadline(true, false) -- n, err := p.conn.Read(buf) -- return n, NewTTransportExceptionFromError(err) --} -- --func (p *TSSLSocket) Write(buf []byte) (int, error) { -- if !p.IsOpen() { -- return 0, NewTTransportException(NOT_OPEN, ""Connection not open"") -- } -- p.pushDeadline(false, true) -- return p.conn.Write(buf) --} -- --func (p *TSSLSocket) Flush() error { -- return nil --} -- --func (p *TSSLSocket) Interrupt() error { -- if !p.IsOpen() { -- return nil -- } -- return p.conn.Close() --} -- --func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) { -- const maxSize = ^uint64(0) -- return maxSize // the thruth is, we just don't know unless framed is used --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/transport.go -deleted file mode 100644 -index 70a85a848958f..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/transport.go -+++ /dev/null -@@ -1,65 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""errors"" -- ""io"" --) -- --var errTransportInterrupted = errors.New(""Transport Interrupted"") -- --type Flusher interface { -- Flush() (err error) --} -- --type ReadSizeProvider interface { -- RemainingBytes() (num_bytes uint64) --} -- --// Encapsulates the I/O layer --type TTransport interface { -- io.ReadWriteCloser -- Flusher -- ReadSizeProvider -- -- // Opens the transport for communication -- Open() error -- -- // Returns true if the transport is open -- IsOpen() bool --} -- --type stringWriter interface { -- WriteString(s string) (n int, err error) --} -- --// This is ""enchanced"" transport with extra capabilities. You need to use one of these --// to construct protocol. --// Notably, TSocket does not implement this interface, and it is always a mistake to use --// TSocket directly in protocol. --type TRichTransport interface { -- io.ReadWriter -- io.ByteReader -- io.ByteWriter -- stringWriter -- Flusher -- ReadSizeProvider --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go -deleted file mode 100644 -index 9505b44612d05..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go -+++ /dev/null -@@ -1,90 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --import ( -- ""errors"" -- ""io"" --) -- --type timeoutable interface { -- Timeout() bool --} -- --// Thrift Transport exception --type TTransportException interface { -- TException -- TypeId() int -- Err() error --} -- --const ( -- UNKNOWN_TRANSPORT_EXCEPTION = 0 -- NOT_OPEN = 1 -- ALREADY_OPEN = 2 -- TIMED_OUT = 3 -- END_OF_FILE = 4 --) -- --type tTransportException struct { -- typeId int -- err error --} -- --func (p *tTransportException) TypeId() int { -- return p.typeId --} -- --func (p *tTransportException) Error() string { -- return p.err.Error() --} -- --func (p *tTransportException) Err() error { -- return p.err --} -- --func NewTTransportException(t int, e string) TTransportException { -- return &tTransportException{typeId: t, err: errors.New(e)} --} -- --func NewTTransportExceptionFromError(e error) TTransportException { -- if e == nil { -- return nil -- } -- -- if t, ok := e.(TTransportException); ok { -- return t -- } -- -- switch v := e.(type) { -- case TTransportException: -- return v -- case timeoutable: -- if v.Timeout() { -- return &tTransportException{typeId: TIMED_OUT, err: e} -- } -- } -- -- if e == io.EOF { -- return &tTransportException{typeId: END_OF_FILE, err: e} -- } -- -- return &tTransportException{typeId: UNKNOWN_TRANSPORT_EXCEPTION, err: e} --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go b/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go -deleted file mode 100644 -index c805807940a13..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go -+++ /dev/null -@@ -1,39 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --// Factory class used to create wrapped instance of Transports. --// This is used primarily in servers, which get Transports from --// a ServerTransport and then may want to mutate them (i.e. create --// a BufferedTransport from the underlying base transport) --type TTransportFactory interface { -- GetTransport(trans TTransport) (TTransport, error) --} -- --type tTransportFactory struct{} -- --// Return a wrapped instance of the base Transport. --func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) { -- return trans, nil --} -- --func NewTTransportFactory() TTransportFactory { -- return &tTransportFactory{} --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/type.go b/vendor/github.com/apache/thrift/lib/go/thrift/type.go -deleted file mode 100644 -index 4292ffcadb130..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/type.go -+++ /dev/null -@@ -1,69 +0,0 @@ --/* -- * Licensed to the Apache Software Foundation (ASF) under one -- * or more contributor license agreements. See the NOTICE file -- * distributed with this work for additional information -- * regarding copyright ownership. The ASF licenses this file -- * to you under the Apache License, Version 2.0 (the -- * ""License""); you may not use this file except in compliance -- * with the License. You may obtain a copy of the License at -- * -- * http://www.apache.org/licenses/LICENSE-2.0 -- * -- * Unless required by applicable law or agreed to in writing, -- * software distributed under the License is distributed on an -- * ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -- * KIND, either express or implied. See the License for the -- * specific language governing permissions and limitations -- * under the License. -- */ -- --package thrift -- --// Type constants in the Thrift protocol --type TType byte -- --const ( -- STOP = 0 -- VOID = 1 -- BOOL = 2 -- BYTE = 3 -- I08 = 3 -- DOUBLE = 4 -- I16 = 6 -- I32 = 8 -- I64 = 10 -- STRING = 11 -- UTF7 = 11 -- STRUCT = 12 -- MAP = 13 -- SET = 14 -- LIST = 15 -- UTF8 = 16 -- UTF16 = 17 -- //BINARY = 18 wrong and unusued --) -- --var typeNames = map[int]string{ -- STOP: ""STOP"", -- VOID: ""VOID"", -- BOOL: ""BOOL"", -- BYTE: ""BYTE"", -- DOUBLE: ""DOUBLE"", -- I16: ""I16"", -- I32: ""I32"", -- I64: ""I64"", -- STRING: ""STRING"", -- STRUCT: ""STRUCT"", -- MAP: ""MAP"", -- SET: ""SET"", -- LIST: ""LIST"", -- UTF8: ""UTF8"", -- UTF16: ""UTF16"", --} -- --func (p TType) String() string { -- if s, ok := typeNames[int(p)]; ok { -- return s -- } -- return ""Unknown"" --} -diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go -deleted file mode 100644 -index f2f073222171f..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go -+++ /dev/null -@@ -1,131 +0,0 @@ --/* --* Licensed to the Apache Software Foundation (ASF) under one --* or more contributor license agreements. See the NOTICE file --* distributed with this work for additional information --* regarding copyright ownership. The ASF licenses this file --* to you under the Apache License, Version 2.0 (the --* ""License""); you may not use this file except in compliance --* with the License. You may obtain a copy of the License at --* --* http://www.apache.org/licenses/LICENSE-2.0 --* --* Unless required by applicable law or agreed to in writing, --* software distributed under the License is distributed on an --* ""AS IS"" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY --* KIND, either express or implied. See the License for the --* specific language governing permissions and limitations --* under the License. -- */ -- --package thrift -- --import ( -- ""compress/zlib"" -- ""io"" -- ""log"" --) -- --// TZlibTransportFactory is a factory for TZlibTransport instances --type TZlibTransportFactory struct { -- level int -- factory TTransportFactory --} -- --// TZlibTransport is a TTransport implementation that makes use of zlib compression. --type TZlibTransport struct { -- reader io.ReadCloser -- transport TTransport -- writer *zlib.Writer --} -- --// GetTransport constructs a new instance of NewTZlibTransport --func (p *TZlibTransportFactory) GetTransport(trans TTransport) (TTransport, error) { -- if p.factory != nil { -- // wrap other factory -- var err error -- trans, err = p.factory.GetTransport(trans) -- if err != nil { -- return nil, err -- } -- } -- return NewTZlibTransport(trans, p.level) --} -- --// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory --func NewTZlibTransportFactory(level int) *TZlibTransportFactory { -- return &TZlibTransportFactory{level: level, factory: nil} --} -- --// NewTZlibTransportFactory constructs a new instance of TZlibTransportFactory --// as a wrapper over existing transport factory --func NewTZlibTransportFactoryWithFactory(level int, factory TTransportFactory) *TZlibTransportFactory { -- return &TZlibTransportFactory{level: level, factory: factory} --} -- --// NewTZlibTransport constructs a new instance of TZlibTransport --func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) { -- w, err := zlib.NewWriterLevel(trans, level) -- if err != nil { -- log.Println(err) -- return nil, err -- } -- -- return &TZlibTransport{ -- writer: w, -- transport: trans, -- }, nil --} -- --// Close closes the reader and writer (flushing any unwritten data) and closes --// the underlying transport. --func (z *TZlibTransport) Close() error { -- if z.reader != nil { -- if err := z.reader.Close(); err != nil { -- return err -- } -- } -- if err := z.writer.Close(); err != nil { -- return err -- } -- return z.transport.Close() --} -- --// Flush flushes the writer and its underlying transport. --func (z *TZlibTransport) Flush() error { -- if err := z.writer.Flush(); err != nil { -- return err -- } -- return z.transport.Flush() --} -- --// IsOpen returns true if the transport is open --func (z *TZlibTransport) IsOpen() bool { -- return z.transport.IsOpen() --} -- --// Open opens the transport for communication --func (z *TZlibTransport) Open() error { -- return z.transport.Open() --} -- --func (z *TZlibTransport) Read(p []byte) (int, error) { -- if z.reader == nil { -- r, err := zlib.NewReader(z.transport) -- if err != nil { -- return 0, NewTTransportExceptionFromError(err) -- } -- z.reader = r -- } -- -- return z.reader.Read(p) --} -- --// RemainingBytes returns the size in bytes of the data that is still to be --// read. --func (z *TZlibTransport) RemainingBytes() uint64 { -- return z.transport.RemainingBytes() --} -- --func (z *TZlibTransport) Write(p []byte) (int, error) { -- return z.writer.Write(p) --} -diff --git a/vendor/github.com/apache/thrift/lib/hs/LICENSE b/vendor/github.com/apache/thrift/lib/hs/LICENSE -deleted file mode 100644 -index d645695673349..0000000000000 ---- a/vendor/github.com/apache/thrift/lib/hs/LICENSE -+++ /dev/null -@@ -1,202 +0,0 @@ -- -- Apache License -- Version 2.0, January 2004 -- http://www.apache.org/licenses/ -- -- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -- -- 1. Definitions. -- -- ""License"" shall mean the terms and conditions for use, reproduction, -- and distribution as defined by Sections 1 through 9 of this document. -- -- ""Licensor"" shall mean the copyright owner or entity authorized by -- the copyright owner that is granting the License. -- -- ""Legal Entity"" shall mean the union of the acting entity and all -- other entities that control, are controlled by, or are under common -- control with that entity. For the purposes of this definition, -- ""control"" means (i) the power, direct or indirect, to cause the -- direction or management of such entity, whether by contract or -- otherwise, or (ii) ownership of fifty percent (50%) or more of the -- outstanding shares, or (iii) beneficial ownership of such entity. -- -- ""You"" (or ""Your"") shall mean an individual or Legal Entity -- exercising permissions granted by this License. -- -- ""Source"" form shall mean the preferred form for making modifications, -- including but not limited to software source code, documentation -- source, and configuration files. -- -- ""Object"" form shall mean any form resulting from mechanical -- transformation or translation of a Source form, including but -- not limited to compiled object code, generated documentation, -- and conversions to other media types. -- -- ""Work"" shall mean the work of authorship, whether in Source or -- Object form, made available under the License, as indicated by a -- copyright notice that is included in or attached to the work -- (an example is provided in the Appendix below). -- -- ""Derivative Works"" shall mean any work, whether in Source or Object -- form, that is based on (or derived from) the Work and for which the -- editorial revisions, annotations, elaborations, or other modifications -- represent, as a whole, an original work of authorship. For the purposes -- of this License, Derivative Works shall not include works that remain -- separable from, or merely link (or bind by name) to the interfaces of, -- the Work and Derivative Works thereof. -- -- ""Contribution"" shall mean any work of authorship, including -- the original version of the Work and any modifications or additions -- to that Work or Derivative Works thereof, that is intentionally -- submitted to Licensor for inclusion in the Work by the copyright owner -- or by an individual or Legal Entity authorized to submit on behalf of -- the copyright owner. For the purposes of this definition, ""submitted"" -- means any form of electronic, verbal, or written communication sent -- to the Licensor or its representatives, including but not limited to -- communication on electronic mailing lists, source code control systems, -- and issue tracking systems that are managed by, or on behalf of, the -- Licensor for the purpose of discussing and improving the Work, but -- excluding communication that is conspicuously marked or otherwise -- designated in writing by the copyright owner as ""Not a Contribution."" -- -- ""Contributor"" shall mean Licensor and any individual or Legal Entity -- on behalf of whom a Contribution has been received by Licensor and -- subsequently incorporated within the Work. -- -- 2. Grant of Copyright License. Subject to the terms and conditions of -- this License, each Contributor hereby grants to You a perpetual, -- worldwide, non-exclusive, no-charge, royalty-free, irrevocable -- copyright license to reproduce, prepare Derivative Works of, -- publicly display, publicly perform, sublicense, and distribute the -- Work and such Derivative Works in Source or Object form. -- -- 3. Grant of Patent License. Subject to the terms and conditions of -- this License, each Contributor hereby grants to You a perpetual, -- worldwide, non-exclusive, no-charge, royalty-free, irrevocable -- (except as stated in this section) patent license to make, have made, -- use, offer to sell, sell, import, and otherwise transfer the Work, -- where such license applies only to those patent claims licensable -- by such Contributor that are necessarily infringed by their -- Contribution(s) alone or by combination of their Contribution(s) -- with the Work to which such Contribution(s) was submitted. If You -- institute patent litigation against any entity (including a -- cross-claim or counterclaim in a lawsuit) alleging that the Work -- or a Contribution incorporated within the Work constitutes direct -- or contributory patent infringement, then any patent licenses -- granted to You under this License for that Work shall terminate -- as of the date such litigation is filed. -- -- 4. Redistribution. You may reproduce and distribute copies of the -- Work or Derivative Works thereof in any medium, with or without -- modifications, and in Source or Object form, provided that You -- meet the following conditions: -- -- (a) You must give any other recipients of the Work or -- Derivative Works a copy of this License; and -- -- (b) You must cause any modified files to carry prominent notices -- stating that You changed the files; and -- -- (c) You must retain, in the Source form of any Derivative Works -- that You distribute, all copyright, patent, trademark, and -- attribution notices from the Source form of the Work, -- excluding those notices that do not pertain to any part of -- the Derivative Works; and -- -- (d) If the Work includes a ""NOTICE"" text file as part of its -- distribution, then any Derivative Works that You distribute must -- include a readable copy of the attribution notices contained -- within such NOTICE file, excluding those notices that do not -- pertain to any part of the Derivative Works, in at least one -- of the following places: within a NOTICE text file distributed -- as part of the Derivative Works; within the Source form or -- documentation, if provided along with the Derivative Works; or, -- within a display generated by the Derivative Works, if and -- wherever such third-party notices normally appear. The contents -- of the NOTICE file are for informational purposes only and -- do not modify the License. You may add Your own attribution -- notices within Derivative Works that You distribute, alongside -- or as an addendum to the NOTICE text from the Work, provided -- that such additional attribution notices cannot be construed -- as modifying the License. -- -- You may add Your own copyright statement to Your modifications and -- may provide additional or different license terms and conditions -- for use, reproduction, or distribution of Your modifications, or -- for any such Derivative Works as a whole, provided Your use, -- reproduction, and distribution of the Work otherwise complies with -- the conditions stated in this License. -- -- 5. Submission of Contributions. Unless You explicitly state otherwise, -- any Contribution intentionally submitted for inclusion in the Work -- by You to the Licensor shall be under the terms and conditions of -- this License, without any additional terms or conditions. -- Notwithstanding the above, nothing herein shall supersede or modify -- the terms of any separate license agreement you may have executed -- with Licensor regarding such Contributions. -- -- 6. Trademarks. This License does not grant permission to use the trade -- names, trademarks, service marks, or product names of the Licensor, -- except as required for reasonable and customary use in describing the -- origin of the Work and reproducing the content of the NOTICE file. -- -- 7. Disclaimer of Warranty. Unless required by applicable law or -- agreed to in writing, Licensor provides the Work (and each -- Contributor provides its Contributions) on an ""AS IS"" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -- implied, including, without limitation, any warranties or conditions -- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A -- PARTICULAR PURPOSE. You are solely responsible for determining the -- appropriateness of using or redistributing the Work and assume any -- risks associated with Your exercise of permissions under this License. -- -- 8. Limitation of Liability. In no event and under no legal theory, -- whether in tort (including negligence), contract, or otherwise, -- unless required by applicable law (such as deliberate and grossly -- negligent acts) or agreed to in writing, shall any Contributor be -- liable to You for damages, including any direct, indirect, special, -- incidental, or consequential damages of any character arising as a -- result of this License or out of the use or inability to use the -- Work (including but not limited to damages for loss of goodwill, -- work stoppage, computer failure or malfunction, or any and all -- other commercial damages or losses), even if such Contributor -- has been advised of the possibility of such damages. -- -- 9. Accepting Warranty or Additional Liability. While redistributing -- the Work or Derivative Works thereof, You may choose to offer, -- and charge a fee for, acceptance of support, warranty, indemnity, -- or other liability obligations and/or rights consistent with this -- License. However, in accepting such obligations, You may act only -- on Your own behalf and on Your sole responsibility, not on behalf -- of any other Contributor, and only if You agree to indemnify, -- defend, and hold each Contributor harmless for any liability -- incurred by, or claims asserted against, such Contributor by reason -- of your accepting any such warranty or additional liability. -- -- END OF TERMS AND CONDITIONS -- -- APPENDIX: How to apply the Apache License to your work. -- -- To apply the Apache License to your work, attach the following -- boilerplate notice, with the fields enclosed by brackets ""[]"" -- replaced with your own identifying information. (Don't include -- the brackets!) The text should be enclosed in the appropriate -- comment syntax for the file format. We also recommend that a -- file or class name and description of purpose be included on the -- same ""printed page"" as the copyright notice for easier -- identification within third-party archives. -- -- Copyright [yyyy] [name of copyright owner] -- -- Licensed under the Apache License, Version 2.0 (the ""License""); -- you may not use this file except in compliance with the License. -- You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an ""AS IS"" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the License for the specific language governing permissions and -- limitations under the License. -diff --git a/vendor/github.com/apache/thrift/tutorial/erl/client.sh b/vendor/github.com/apache/thrift/tutorial/erl/client.sh -deleted file mode 120000 -index a417e0da98449..0000000000000 ---- a/vendor/github.com/apache/thrift/tutorial/erl/client.sh -+++ /dev/null -@@ -1 +0,0 @@ --server.sh -\ No newline at end of file -diff --git a/vendor/github.com/apache/thrift/tutorial/hs/LICENSE b/vendor/github.com/apache/thrift/tutorial/hs/LICENSE -deleted file mode 100644 -index 3b6d7d74cc9b1..0000000000000 ---- a/vendor/github.com/apache/thrift/tutorial/hs/LICENSE -+++ /dev/null -@@ -1,239 +0,0 @@ -- -- Apache License -- Version 2.0, January 2004 -- http://www.apache.org/licenses/ -- -- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -- -- 1. Definitions. -- -- ""License"" shall mean the terms and conditions for use, reproduction, -- and distribution as defined by Sections 1 through 9 of this document. -- -- ""Licensor"" shall mean the copyright owner or entity authorized by -- the copyright owner that is granting the License. -- -- ""Legal Entity"" shall mean the union of the acting entity and all -- other entities that control, are controlled by, or are under common -- control with that entity. For the purposes of this definition, -- ""control"" means (i) the power, direct or indirect, to cause the -- direction or management of such entity, whether by contract or -- otherwise, or (ii) ownership of fifty percent (50%) or more of the -- outstanding shares, or (iii) beneficial ownership of such entity. -- -- ""You"" (or ""Your"") shall mean an individual or Legal Entity -- exercising permissions granted by this License. -- -- ""Source"" form shall mean the preferred form for making modifications, -- including but not limited to software source code, documentation -- source, and configuration files. -- -- ""Object"" form shall mean any form resulting from mechanical -- transformation or translation of a Source form, including but -- not limited to compiled object code, generated documentation, -- and conversions to other media types. -- -- ""Work"" shall mean the work of authorship, whether in Source or -- Object form, made available under the License, as indicated by a -- copyright notice that is included in or attached to the work -- (an example is provided in the Appendix below). -- -- ""Derivative Works"" shall mean any work, whether in Source or Object -- form, that is based on (or derived from) the Work and for which the -- editorial revisions, annotations, elaborations, or other modifications -- represent, as a whole, an original work of authorship. For the purposes -- of this License, Derivative Works shall not include works that remain -- separable from, or merely link (or bind by name) to the interfaces of, -- the Work and Derivative Works thereof. -- -- ""Contribution"" shall mean any work of authorship, including -- the original version of the Work and any modifications or additions -- to that Work or Derivative Works thereof, that is intentionally -- submitted to Licensor for inclusion in the Work by the copyright owner -- or by an individual or Legal Entity authorized to submit on behalf of -- the copyright owner. For the purposes of this definition, ""submitted"" -- means any form of electronic, verbal, or written communication sent -- to the Licensor or its representatives, including but not limited to -- communication on electronic mailing lists, source code control systems, -- and issue tracking systems that are managed by, or on behalf of, the -- Licensor for the purpose of discussing and improving the Work, but -- excluding communication that is conspicuously marked or otherwise -- designated in writing by the copyright owner as ""Not a Contribution."" -- -- ""Contributor"" shall mean Licensor and any individual or Legal Entity -- on behalf of whom a Contribution has been received by Licensor and -- subsequently incorporated within the Work. -- -- 2. Grant of Copyright License. Subject to the terms and conditions of -- this License, each Contributor hereby grants to You a perpetual, -- worldwide, non-exclusive, no-charge, royalty-free, irrevocable -- copyright license to reproduce, prepare Derivative Works of, -- publicly display, publicly perform, sublicense, and distribute the -- Work and such Derivative Works in Source or Object form. -- -- 3. Grant of Patent License. Subject to the terms and conditions of -- this License, each Contributor hereby grants to You a perpetual, -- worldwide, non-exclusive, no-charge, royalty-free, irrevocable -- (except as stated in this section) patent license to make, have made, -- use, offer to sell, sell, import, and otherwise transfer the Work, -- where such license applies only to those patent claims licensable -- by such Contributor that are necessarily infringed by their -- Contribution(s) alone or by combination of their Contribution(s) -- with the Work to which such Contribution(s) was submitted. If You -- institute patent litigation against any entity (including a -- cross-claim or counterclaim in a lawsuit) alleging that the Work -- or a Contribution incorporated within the Work constitutes direct -- or contributory patent infringement, then any patent licenses -- granted to You under this License for that Work shall terminate -- as of the date such litigation is filed. -- -- 4. Redistribution. You may reproduce and distribute copies of the -- Work or Derivative Works thereof in any medium, with or without -- modifications, and in Source or Object form, provided that You -- meet the following conditions: -- -- (a) You must give any other recipients of the Work or -- Derivative Works a copy of this License; and -- -- (b) You must cause any modified files to carry prominent notices -- stating that You changed the files; and -- -- (c) You must retain, in the Source form of any Derivative Works -- that You distribute, all copyright, patent, trademark, and -- attribution notices from the Source form of the Work, -- excluding those notices that do not pertain to any part of -- the Derivative Works; and -- -- (d) If the Work includes a ""NOTICE"" text file as part of its -- distribution, then any Derivative Works that You distribute must -- include a readable copy of the attribution notices contained -- within such NOTICE file, excluding those notices that do not -- pertain to any part of the Derivative Works, in at least one -- of the following places: within a NOTICE text file distributed -- as part of the Derivative Works; within the Source form or -- documentation, if provided along with the Derivative Works; or, -- within a display generated by the Derivative Works, if and -- wherever such third-party notices normally appear. The contents -- of the NOTICE file are for informational purposes only and -- do not modify the License. You may add Your own attribution -- notices within Derivative Works that You distribute, alongside -- or as an addendum to the NOTICE text from the Work, provided -- that such additional attribution notices cannot be construed -- as modifying the License. -- -- You may add Your own copyright statement to Your modifications and -- may provide additional or different license terms and conditions -- for use, reproduction, or distribution of Your modifications, or -- for any such Derivative Works as a whole, provided Your use, -- reproduction, and distribution of the Work otherwise complies with -- the conditions stated in this License. -- -- 5. Submission of Contributions. Unless You explicitly state otherwise, -- any Contribution intentionally submitted for inclusion in the Work -- by You to the Licensor shall be under the terms and conditions of -- this License, without any additional terms or conditions. -- Notwithstanding the above, nothing herein shall supersede or modify -- the terms of any separate license agreement you may have executed -- with Licensor regarding such Contributions. -- -- 6. Trademarks. This License does not grant permission to use the trade -- names, trademarks, service marks, or product names of the Licensor, -- except as required for reasonable and customary use in describing the -- origin of the Work and reproducing the content of the NOTICE file. -- -- 7. Disclaimer of Warranty. Unless required by applicable law or -- agreed to in writing, Licensor provides the Work (and each -- Contributor provides its Contributions) on an ""AS IS"" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -- implied, including, without limitation, any warranties or conditions -- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A -- PARTICULAR PURPOSE. You are solely responsible for determining the -- appropriateness of using or redistributing the Work and assume any -- risks associated with Your exercise of permissions under this License. -- -- 8. Limitation of Liability. In no event and under no legal theory, -- whether in tort (including negligence), contract, or otherwise, -- unless required by applicable law (such as deliberate and grossly -- negligent acts) or agreed to in writing, shall any Contributor be -- liable to You for damages, including any direct, indirect, special, -- incidental, or consequential damages of any character arising as a -- result of this License or out of the use or inability to use the -- Work (including but not limited to damages for loss of goodwill, -- work stoppage, computer failure or malfunction, or any and all -- other commercial damages or losses), even if such Contributor -- has been advised of the possibility of such damages. -- -- 9. Accepting Warranty or Additional Liability. While redistributing -- the Work or Derivative Works thereof, You may choose to offer, -- and charge a fee for, acceptance of support, warranty, indemnity, -- or other liability obligations and/or rights consistent with this -- License. However, in accepting such obligations, You may act only -- on Your own behalf and on Your sole responsibility, not on behalf -- of any other Contributor, and only if You agree to indemnify, -- defend, and hold each Contributor harmless for any liability -- incurred by, or claims asserted against, such Contributor by reason -- of your accepting any such warranty or additional liability. -- -- END OF TERMS AND CONDITIONS -- -- APPENDIX: How to apply the Apache License to your work. -- -- To apply the Apache License to your work, attach the following -- boilerplate notice, with the fields enclosed by brackets ""[]"" -- replaced with your own identifying information. (Don't include -- the brackets!) The text should be enclosed in the appropriate -- comment syntax for the file format. We also recommend that a -- file or class name and description of purpose be included on the -- same ""printed page"" as the copyright notice for easier -- identification within third-party archives. -- -- Copyright [yyyy] [name of copyright owner] -- -- Licensed under the Apache License, Version 2.0 (the ""License""); -- you may not use this file except in compliance with the License. -- You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an ""AS IS"" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the License for the specific language governing permissions and -- limitations under the License. -- ---------------------------------------------------- --SOFTWARE DISTRIBUTED WITH THRIFT: -- --The Apache Thrift software includes a number of subcomponents with --separate copyright notices and license terms. Your use of the source --code for the these subcomponents is subject to the terms and --conditions of the following licenses. -- ---------------------------------------------------- --Portions of the following files are licensed under the MIT License: -- -- lib/erl/src/Makefile.am -- --Please see doc/otp-base-license.txt for the full terms of this license. -- ---------------------------------------------------- --For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: -- --# Copyright (c) 2007 Thomas Porschberg --# --# Copying and distribution of this file, with or without --# modification, are permitted in any medium without royalty provided --# the copyright notice and this notice are preserved. -- ---------------------------------------------------- --For the lib/nodejs/lib/thrift/json_parse.js: -- --/* -- json_parse.js -- 2015-05-02 -- Public Domain. -- NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -- --*/ --(By Douglas Crockford ) ---------------------------------------------------- -diff --git a/vendor/github.com/eapache/go-resiliency/LICENSE b/vendor/github.com/eapache/go-resiliency/LICENSE -deleted file mode 100644 -index 698a3f51397ef..0000000000000 ---- a/vendor/github.com/eapache/go-resiliency/LICENSE -+++ /dev/null -@@ -1,22 +0,0 @@ --The MIT License (MIT) -- --Copyright (c) 2014 Evan Huus -- --Permission is hereby granted, free of charge, to any person obtaining a copy --of this software and associated documentation files (the ""Software""), to deal --in the Software without restriction, including without limitation the rights --to use, copy, modify, merge, publish, distribute, sublicense, and/or sell --copies of the Software, and to permit persons to whom the Software is --furnished to do so, subject to the following conditions: -- --The above copyright notice and this permission notice shall be included in all --copies or substantial portions of the Software. -- --THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR --IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, --FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE --AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER --LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, --OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE --SOFTWARE. -- -diff --git a/vendor/github.com/eapache/go-resiliency/breaker/README.md b/vendor/github.com/eapache/go-resiliency/breaker/README.md -deleted file mode 100644 -index 2d1b3d93225d8..0000000000000 ---- a/vendor/github.com/eapache/go-resiliency/breaker/README.md -+++ /dev/null -@@ -1,34 +0,0 @@ --circuit-breaker --=============== -- --[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency) --[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/breaker?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/breaker) --[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) -- --The circuit-breaker resiliency pattern for golang. -- --Creating a breaker takes three parameters: --- error threshold (for opening the breaker) --- success threshold (for closing the breaker) --- timeout (how long to keep the breaker open) -- --```go --b := breaker.New(3, 1, 5*time.Second) -- --for { -- result := b.Run(func() error { -- // communicate with some external service and -- // return an error if the communication failed -- return nil -- }) -- -- switch result { -- case nil: -- // success! -- case breaker.ErrBreakerOpen: -- // our function wasn't run because the breaker was open -- default: -- // some other error -- } --} --``` -diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go -deleted file mode 100644 -index f88ca7248b0fd..0000000000000 ---- a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go -+++ /dev/null -@@ -1,161 +0,0 @@ --// Package breaker implements the circuit-breaker resiliency pattern for Go. --package breaker -- --import ( -- ""errors"" -- ""sync"" -- ""sync/atomic"" -- ""time"" --) -- --// ErrBreakerOpen is the error returned from Run() when the function is not executed --// because the breaker is currently open. --var ErrBreakerOpen = errors.New(""circuit breaker is open"") -- --const ( -- closed uint32 = iota -- open -- halfOpen --) -- --// Breaker implements the circuit-breaker resiliency pattern --type Breaker struct { -- errorThreshold, successThreshold int -- timeout time.Duration -- -- lock sync.Mutex -- state uint32 -- errors, successes int -- lastError time.Time --} -- --// New constructs a new circuit-breaker that starts closed. --// From closed, the breaker opens if ""errorThreshold"" errors are seen --// without an error-free period of at least ""timeout"". From open, the --// breaker half-closes after ""timeout"". From half-open, the breaker closes --// after ""successThreshold"" consecutive successes, or opens on a single error. --func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker { -- return &Breaker{ -- errorThreshold: errorThreshold, -- successThreshold: successThreshold, -- timeout: timeout, -- } --} -- --// Run will either return ErrBreakerOpen immediately if the circuit-breaker is --// already open, or it will run the given function and pass along its return --// value. It is safe to call Run concurrently on the same Breaker. --func (b *Breaker) Run(work func() error) error { -- state := atomic.LoadUint32(&b.state) -- -- if state == open { -- return ErrBreakerOpen -- } -- -- return b.doWork(state, work) --} -- --// Go will either return ErrBreakerOpen immediately if the circuit-breaker is --// already open, or it will run the given function in a separate goroutine. --// If the function is run, Go will return nil immediately, and will *not* return --// the return value of the function. It is safe to call Go concurrently on the --// same Breaker. --func (b *Breaker) Go(work func() error) error { -- state := atomic.LoadUint32(&b.state) -- -- if state == open { -- return ErrBreakerOpen -- } -- -- // errcheck complains about ignoring the error return value, but -- // that's on purpose; if you want an error from a goroutine you have to -- // get it over a channel or something -- go b.doWork(state, work) -- -- return nil --} -- --func (b *Breaker) doWork(state uint32, work func() error) error { -- var panicValue interface{} -- -- result := func() error { -- defer func() { -- panicValue = recover() -- }() -- return work() -- }() -- -- if result == nil && panicValue == nil && state == closed { -- // short-circuit the normal, success path without contending -- // on the lock -- return nil -- } -- -- // oh well, I guess we have to contend on the lock -- b.processResult(result, panicValue) -- -- if panicValue != nil { -- // as close as Go lets us come to a ""rethrow"" although unfortunately -- // we lose the original panicing location -- panic(panicValue) -- } -- -- return result --} -- --func (b *Breaker) processResult(result error, panicValue interface{}) { -- b.lock.Lock() -- defer b.lock.Unlock() -- -- if result == nil && panicValue == nil { -- if b.state == halfOpen { -- b.successes++ -- if b.successes == b.successThreshold { -- b.closeBreaker() -- } -- } -- } else { -- if b.errors > 0 { -- expiry := b.lastError.Add(b.timeout) -- if time.Now().After(expiry) { -- b.errors = 0 -- } -- } -- -- switch b.state { -- case closed: -- b.errors++ -- if b.errors == b.errorThreshold { -- b.openBreaker() -- } else { -- b.lastError = time.Now() -- } -- case halfOpen: -- b.openBreaker() -- } -- } --} -- --func (b *Breaker) openBreaker() { -- b.changeState(open) -- go b.timer() --} -- --func (b *Breaker) closeBreaker() { -- b.changeState(closed) --} -- --func (b *Breaker) timer() { -- time.Sleep(b.timeout) -- -- b.lock.Lock() -- defer b.lock.Unlock() -- -- b.changeState(halfOpen) --} -- --func (b *Breaker) changeState(newState uint32) { -- b.errors = 0 -- b.successes = 0 -- atomic.StoreUint32(&b.state, newState) --} -diff --git a/vendor/github.com/eapache/go-xerial-snappy/.gitignore b/vendor/github.com/eapache/go-xerial-snappy/.gitignore -deleted file mode 100644 -index daf913b1b347a..0000000000000 ---- a/vendor/github.com/eapache/go-xerial-snappy/.gitignore -+++ /dev/null -@@ -1,24 +0,0 @@ --# Compiled Object files, Static and Dynamic libs (Shared Objects) --*.o --*.a --*.so -- --# Folders --_obj --_test -- --# Architecture specific extensions/prefixes --*.[568vq] --[568vq].out -- --*.cgo1.go --*.cgo2.c --_cgo_defun.c --_cgo_gotypes.go --_cgo_export.* -- --_testmain.go -- --*.exe --*.test --*.prof -diff --git a/vendor/github.com/eapache/go-xerial-snappy/.travis.yml b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml -deleted file mode 100644 -index d6cf4f1fa1b55..0000000000000 ---- a/vendor/github.com/eapache/go-xerial-snappy/.travis.yml -+++ /dev/null -@@ -1,7 +0,0 @@ --language: go -- --go: --- 1.5.4 --- 1.6.1 -- --sudo: false -diff --git a/vendor/github.com/eapache/go-xerial-snappy/LICENSE b/vendor/github.com/eapache/go-xerial-snappy/LICENSE -deleted file mode 100644 -index 5bf3688d9e412..0000000000000 ---- a/vendor/github.com/eapache/go-xerial-snappy/LICENSE -+++ /dev/null -@@ -1,21 +0,0 @@ --The MIT License (MIT) -- --Copyright (c) 2016 Evan Huus -- --Permission is hereby granted, free of charge, to any person obtaining a copy --of this software and associated documentation files (the ""Software""), to deal --in the Software without restriction, including without limitation the rights --to use, copy, modify, merge, publish, distribute, sublicense, and/or sell --copies of the Software, and to permit persons to whom the Software is --furnished to do so, subject to the following conditions: -- --The above copyright notice and this permission notice shall be included in all --copies or substantial portions of the Software. -- --THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR --IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, --FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE --AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER --LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, --OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE --SOFTWARE. -diff --git a/vendor/github.com/eapache/go-xerial-snappy/README.md b/vendor/github.com/eapache/go-xerial-snappy/README.md -deleted file mode 100644 -index 3f2695c72826f..0000000000000 ---- a/vendor/github.com/eapache/go-xerial-snappy/README.md -+++ /dev/null -@@ -1,13 +0,0 @@ --# go-xerial-snappy -- --[![Build Status](https://travis-ci.org/eapache/go-xerial-snappy.svg?branch=master)](https://travis-ci.org/eapache/go-xerial-snappy) -- --Xerial-compatible Snappy framing support for golang. -- --Packages using Xerial for snappy encoding use a framing format incompatible with --basically everything else in existence. This package wraps Go's built-in snappy --package to support it. -- --Apps that use this format include Apache Kafka (see --https://github.com/dpkp/kafka-python/issues/126#issuecomment-35478921 for --details). -diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go -deleted file mode 100644 -index b8f8b51fcef3f..0000000000000 ---- a/vendor/github.com/eapache/go-xerial-snappy/snappy.go -+++ /dev/null -@@ -1,43 +0,0 @@ --package snappy -- --import ( -- ""bytes"" -- ""encoding/binary"" -- -- master ""github.com/golang/snappy"" --) -- --var xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0} -- --// Encode encodes data as snappy with no framing header. --func Encode(src []byte) []byte { -- return master.Encode(nil, src) --} -- --// Decode decodes snappy data whether it is traditional unframed --// or includes the xerial framing format. --func Decode(src []byte) ([]byte, error) { -- if !bytes.Equal(src[:8], xerialHeader) { -- return master.Decode(nil, src) -- } -- -- var ( -- pos = uint32(16) -- max = uint32(len(src)) -- dst = make([]byte, 0, len(src)) -- chunk []byte -- err error -- ) -- for pos < max { -- size := binary.BigEndian.Uint32(src[pos : pos+4]) -- pos += 4 -- -- chunk, err = master.Decode(chunk, src[pos:pos+size]) -- if err != nil { -- return nil, err -- } -- pos += size -- dst = append(dst, chunk...) -- } -- return dst, nil --} -diff --git a/vendor/github.com/eapache/queue/.gitignore b/vendor/github.com/eapache/queue/.gitignore -deleted file mode 100644 -index 836562412fe8a..0000000000000 ---- a/vendor/github.com/eapache/queue/.gitignore -+++ /dev/null -@@ -1,23 +0,0 @@ --# Compiled Object files, Static and Dynamic libs (Shared Objects) --*.o --*.a --*.so -- --# Folders --_obj --_test -- --# Architecture specific extensions/prefixes --*.[568vq] --[568vq].out -- --*.cgo1.go --*.cgo2.c --_cgo_defun.c --_cgo_gotypes.go --_cgo_export.* -- --_testmain.go -- --*.exe --*.test -diff --git a/vendor/github.com/eapache/queue/.travis.yml b/vendor/github.com/eapache/queue/.travis.yml -deleted file mode 100644 -index 235a40a493fff..0000000000000 ---- a/vendor/github.com/eapache/queue/.travis.yml -+++ /dev/null -@@ -1,7 +0,0 @@ --language: go --sudo: false -- --go: -- - 1.2 -- - 1.3 -- - 1.4 -diff --git a/vendor/github.com/eapache/queue/LICENSE b/vendor/github.com/eapache/queue/LICENSE -deleted file mode 100644 -index d5f36dbcaaf61..0000000000000 ---- a/vendor/github.com/eapache/queue/LICENSE -+++ /dev/null -@@ -1,21 +0,0 @@ --The MIT License (MIT) -- --Copyright (c) 2014 Evan Huus -- --Permission is hereby granted, free of charge, to any person obtaining a copy --of this software and associated documentation files (the ""Software""), to deal --in the Software without restriction, including without limitation the rights --to use, copy, modify, merge, publish, distribute, sublicense, and/or sell --copies of the Software, and to permit persons to whom the Software is --furnished to do so, subject to the following conditions: -- --The above copyright notice and this permission notice shall be included in all --copies or substantial portions of the Software. -- --THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR --IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, --FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE --AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER --LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, --OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE --SOFTWARE. -\ No newline at end of file -diff --git a/vendor/github.com/eapache/queue/README.md b/vendor/github.com/eapache/queue/README.md -deleted file mode 100644 -index 8e782335cd773..0000000000000 ---- a/vendor/github.com/eapache/queue/README.md -+++ /dev/null -@@ -1,16 +0,0 @@ --Queue --===== -- --[![Build Status](https://travis-ci.org/eapache/queue.svg)](https://travis-ci.org/eapache/queue) --[![GoDoc](https://godoc.org/github.com/eapache/queue?status.png)](https://godoc.org/github.com/eapache/queue) --[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) -- --A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki. --Using this instead of other, simpler, queue implementations (slice+append or linked list) provides --substantial memory and time benefits, and fewer GC pauses. -- --The queue implemented here is as fast as it is in part because it is *not* thread-safe. -- --Follows semantic versioning using https://gopkg.in/ - import from --[`gopkg.in/eapache/queue.v1`](https://gopkg.in/eapache/queue.v1) --for guaranteed API stability. -diff --git a/vendor/github.com/eapache/queue/queue.go b/vendor/github.com/eapache/queue/queue.go -deleted file mode 100644 -index 71d1acdf27b2f..0000000000000 ---- a/vendor/github.com/eapache/queue/queue.go -+++ /dev/null -@@ -1,102 +0,0 @@ --/* --Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki. --Using this instead of other, simpler, queue implementations (slice+append or linked list) provides --substantial memory and time benefits, and fewer GC pauses. -- --The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe. --*/ --package queue -- --// minQueueLen is smallest capacity that queue may have. --// Must be power of 2 for bitwise modulus: x % n == x & (n - 1). --const minQueueLen = 16 -- --// Queue represents a single instance of the queue data structure. --type Queue struct { -- buf []interface{} -- head, tail, count int --} -- --// New constructs and returns a new Queue. --func New() *Queue { -- return &Queue{ -- buf: make([]interface{}, minQueueLen), -- } --} -- --// Length returns the number of elements currently stored in the queue. --func (q *Queue) Length() int { -- return q.count --} -- --// resizes the queue to fit exactly twice its current contents --// this can result in shrinking if the queue is less than half-full --func (q *Queue) resize() { -- newBuf := make([]interface{}, q.count<<1) -- -- if q.tail > q.head { -- copy(newBuf, q.buf[q.head:q.tail]) -- } else { -- n := copy(newBuf, q.buf[q.head:]) -- copy(newBuf[n:], q.buf[:q.tail]) -- } -- -- q.head = 0 -- q.tail = q.count -- q.buf = newBuf --} -- --// Add puts an element on the end of the queue. --func (q *Queue) Add(elem interface{}) { -- if q.count == len(q.buf) { -- q.resize() -- } -- -- q.buf[q.tail] = elem -- // bitwise modulus -- q.tail = (q.tail + 1) & (len(q.buf) - 1) -- q.count++ --} -- --// Peek returns the element at the head of the queue. This call panics --// if the queue is empty. --func (q *Queue) Peek() interface{} { -- if q.count <= 0 { -- panic(""queue: Peek() called on empty queue"") -- } -- return q.buf[q.head] --} -- --// Get returns the element at index i in the queue. If the index is --// invalid, the call will panic. This method accepts both positive and --// negative index values. Index 0 refers to the first element, and --// index -1 refers to the last. --func (q *Queue) Get(i int) interface{} { -- // If indexing backwards, convert to positive index. -- if i < 0 { -- i += q.count -- } -- if i < 0 || i >= q.count { -- panic(""queue: Get() called with index out of range"") -- } -- // bitwise modulus -- return q.buf[(q.head+i)&(len(q.buf)-1)] --} -- --// Remove removes and returns the element from the front of the queue. If the --// queue is empty, the call will panic. --func (q *Queue) Remove() interface{} { -- if q.count <= 0 { -- panic(""queue: Remove() called on empty queue"") -- } -- ret := q.buf[q.head] -- q.buf[q.head] = nil -- // bitwise modulus -- q.head = (q.head + 1) & (len(q.buf) - 1) -- q.count-- -- // Resize down if buffer 1/4 full. -- if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) { -- q.resize() -- } -- return ret --} -diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md -index c8ef21aadc4b2..78c49dbbeaad4 100644 ---- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md -+++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md -@@ -28,7 +28,9 @@ conn, err := grpc.Dial( - address, - ... // other options - grpc.WithUnaryInterceptor( -- otgrpc.OpenTracingClientInterceptor(tracer))) -+ otgrpc.OpenTracingClientInterceptor(tracer)), -+ grpc.WithStreamInterceptor( -+ otgrpc.OpenTracingStreamClientInterceptor(tracer))) - - // All future RPC activity involving `conn` will be automatically traced. - ``` -@@ -46,7 +48,9 @@ var tracer opentracing.Tracer = ... - s := grpc.NewServer( - ... // other options - grpc.UnaryInterceptor( -- otgrpc.OpenTracingServerInterceptor(tracer))) -+ otgrpc.OpenTracingServerInterceptor(tracer)), -+ grpc.StreamInterceptor( -+ otgrpc.OpenTracingStreamServerInterceptor(tracer))) - - // All future RPC activity involving `s` will be automatically traced. - ``` -diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go -index 3975c3cf51048..3414e55cb1f70 100644 ---- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go -+++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go -@@ -7,6 +7,9 @@ import ( - ""golang.org/x/net/context"" - ""google.golang.org/grpc"" - ""google.golang.org/grpc/metadata"" -+ ""io"" -+ ""runtime"" -+ ""sync/atomic"" - ) - - // OpenTracingClientInterceptor returns a grpc.UnaryClientInterceptor suitable -@@ -50,19 +53,7 @@ func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) - gRPCComponentTag, - ) - defer clientSpan.Finish() -- md, ok := metadata.FromOutgoingContext(ctx) -- if !ok { -- md = metadata.New(nil) -- } else { -- md = md.Copy() -- } -- mdWriter := metadataReaderWriter{md} -- err = tracer.Inject(clientSpan.Context(), opentracing.HTTPHeaders, mdWriter) -- // We have no better place to record an error than the Span itself :-/ -- if err != nil { -- clientSpan.LogFields(log.String(""event"", ""Tracer.Inject() failed""), log.Error(err)) -- } -- ctx = metadata.NewOutgoingContext(ctx, md) -+ ctx = injectSpanContext(ctx, tracer, clientSpan) - if otgrpcOpts.logPayloads { - clientSpan.LogFields(log.Object(""gRPC request"", req)) - } -@@ -81,3 +72,168 @@ func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) - return err - } - } -+ -+// OpenTracingStreamClientInterceptor returns a grpc.StreamClientInterceptor suitable -+// for use in a grpc.Dial call. The interceptor instruments streaming RPCs by creating -+// a single span to correspond to the lifetime of the RPC's stream. -+// -+// For example: -+// -+// conn, err := grpc.Dial( -+// address, -+// ..., // (existing DialOptions) -+// grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer))) -+// -+// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC -+// metadata; they will also look in the context.Context for an active -+// in-process parent Span and establish a ChildOf reference if such a parent -+// Span could be found. -+func OpenTracingStreamClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamClientInterceptor { -+ otgrpcOpts := newOptions() -+ otgrpcOpts.apply(optFuncs...) -+ return func( -+ ctx context.Context, -+ desc *grpc.StreamDesc, -+ cc *grpc.ClientConn, -+ method string, -+ streamer grpc.Streamer, -+ opts ...grpc.CallOption, -+ ) (grpc.ClientStream, error) { -+ var err error -+ var parentCtx opentracing.SpanContext -+ if parent := opentracing.SpanFromContext(ctx); parent != nil { -+ parentCtx = parent.Context() -+ } -+ if otgrpcOpts.inclusionFunc != nil && -+ !otgrpcOpts.inclusionFunc(parentCtx, method, nil, nil) { -+ return streamer(ctx, desc, cc, method, opts...) -+ } -+ -+ clientSpan := tracer.StartSpan( -+ method, -+ opentracing.ChildOf(parentCtx), -+ ext.SpanKindRPCClient, -+ gRPCComponentTag, -+ ) -+ ctx = injectSpanContext(ctx, tracer, clientSpan) -+ cs, err := streamer(ctx, desc, cc, method, opts...) -+ if err != nil { -+ clientSpan.LogFields(log.String(""event"", ""error""), log.String(""message"", err.Error())) -+ SetSpanTags(clientSpan, err, true) -+ clientSpan.Finish() -+ return cs, err -+ } -+ return newOpenTracingClientStream(cs, method, desc, clientSpan, otgrpcOpts), nil -+ } -+} -+ -+func newOpenTracingClientStream(cs grpc.ClientStream, method string, desc *grpc.StreamDesc, clientSpan opentracing.Span, otgrpcOpts *options) grpc.ClientStream { -+ finishChan := make(chan struct{}) -+ -+ isFinished := new(int32) -+ *isFinished = 0 -+ finishFunc := func(err error) { -+ // The current OpenTracing specification forbids finishing a span more than -+ // once. Since we have multiple code paths that could concurrently call -+ // `finishFunc`, we need to add some sort of synchronization to guard against -+ // multiple finishing. -+ if !atomic.CompareAndSwapInt32(isFinished, 0, 1) { -+ return -+ } -+ close(finishChan) -+ defer clientSpan.Finish() -+ if err != nil { -+ clientSpan.LogFields(log.String(""event"", ""error""), log.String(""message"", err.Error())) -+ SetSpanTags(clientSpan, err, true) -+ } -+ if otgrpcOpts.decorator != nil { -+ otgrpcOpts.decorator(clientSpan, method, nil, nil, err) -+ } -+ } -+ go func() { -+ select { -+ case <-finishChan: -+ // The client span is being finished by another code path; hence, no -+ // action is necessary. -+ case <-cs.Context().Done(): -+ finishFunc(cs.Context().Err()) -+ } -+ }() -+ otcs := &openTracingClientStream{ -+ ClientStream: cs, -+ desc: desc, -+ finishFunc: finishFunc, -+ } -+ -+ // The `ClientStream` interface allows one to omit calling `Recv` if it's -+ // known that the result will be `io.EOF`. See -+ // http://stackoverflow.com/q/42915337 -+ // In such cases, there's nothing that triggers the span to finish. We, -+ // therefore, set a finalizer so that the span and the context goroutine will -+ // at least be cleaned up when the garbage collector is run. -+ runtime.SetFinalizer(otcs, func(otcs *openTracingClientStream) { -+ otcs.finishFunc(nil) -+ }) -+ return otcs -+} -+ -+type openTracingClientStream struct { -+ grpc.ClientStream -+ desc *grpc.StreamDesc -+ finishFunc func(error) -+} -+ -+func (cs *openTracingClientStream) Header() (metadata.MD, error) { -+ md, err := cs.ClientStream.Header() -+ if err != nil { -+ cs.finishFunc(err) -+ } -+ return md, err -+} -+ -+func (cs *openTracingClientStream) SendMsg(m interface{}) error { -+ err := cs.ClientStream.SendMsg(m) -+ if err != nil { -+ cs.finishFunc(err) -+ } -+ return err -+} -+ -+func (cs *openTracingClientStream) RecvMsg(m interface{}) error { -+ err := cs.ClientStream.RecvMsg(m) -+ if err == io.EOF { -+ cs.finishFunc(nil) -+ return err -+ } else if err != nil { -+ cs.finishFunc(err) -+ return err -+ } -+ if !cs.desc.ServerStreams { -+ cs.finishFunc(nil) -+ } -+ return err -+} -+ -+func (cs *openTracingClientStream) CloseSend() error { -+ err := cs.ClientStream.CloseSend() -+ if err != nil { -+ cs.finishFunc(err) -+ } -+ return err -+} -+ -+func injectSpanContext(ctx context.Context, tracer opentracing.Tracer, clientSpan opentracing.Span) context.Context { -+ md, ok := metadata.FromOutgoingContext(ctx) -+ if !ok { -+ md = metadata.New(nil) -+ } else { -+ md = md.Copy() -+ } -+ mdWriter := metadataReaderWriter{md} -+ err := tracer.Inject(clientSpan.Context(), opentracing.HTTPHeaders, mdWriter) -+ // We have no better place to record an error than the Span itself :-/ -+ if err != nil { -+ clientSpan.LogFields(log.String(""event"", ""Tracer.Inject() failed""), log.Error(err)) -+ } -+ return metadata.NewOutgoingContext(ctx, md) -+} -diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go -index 2e0d5ab9d9f36..62cf54d221767 100644 ---- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go -+++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go -@@ -33,11 +33,7 @@ func OpenTracingServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) - info *grpc.UnaryServerInfo, - handler grpc.UnaryHandler, - ) (resp interface{}, err error) { -- md, ok := metadata.FromIncomingContext(ctx) -- if !ok { -- md = metadata.New(nil) -- } -- spanContext, err := tracer.Extract(opentracing.HTTPHeaders, metadataReaderWriter{md}) -+ spanContext, err := extractSpanContext(ctx, tracer) - if err != nil && err != opentracing.ErrSpanContextNotFound { - // TODO: establish some sort of error reporting mechanism here. We - // don't know where to put such an error and must rely on Tracer -@@ -73,3 +69,73 @@ func OpenTracingServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) - return resp, err - } - } -+ -+// OpenTracingStreamServerInterceptor returns a grpc.StreamServerInterceptor suitable -+// for use in a grpc.NewServer call. The interceptor instruments streaming RPCs by -+// creating a single span to correspond to the lifetime of the RPC's stream. -+// -+// For example: -+// -+// s := grpc.NewServer( -+// ..., // (existing ServerOptions) -+// grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(tracer))) -+// -+// All gRPC server spans will look for an OpenTracing SpanContext in the gRPC -+// metadata; if found, the server span will act as the ChildOf that RPC -+// SpanContext. -+// -+// Root or not, the server Span will be embedded in the context.Context for the -+// application-specific gRPC handler(s) to access. -+func OpenTracingStreamServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamServerInterceptor { -+ otgrpcOpts := newOptions() -+ otgrpcOpts.apply(optFuncs...) -+ return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { -+ spanContext, err := extractSpanContext(ss.Context(), tracer) -+ if err != nil && err != opentracing.ErrSpanContextNotFound { -+ // TODO: establish some sort of error reporting mechanism here. We -+ // don't know where to put such an error and must rely on Tracer -+ // implementations to do something appropriate for the time being. -+ } -+ if otgrpcOpts.inclusionFunc != nil && -+ !otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, nil, nil) { -+ return handler(srv, ss) -+ } -+ -+ serverSpan := tracer.StartSpan( -+ info.FullMethod, -+ ext.RPCServerOption(spanContext), -+ gRPCComponentTag, -+ ) -+ defer serverSpan.Finish() -+ ss = &openTracingServerStream{ -+ ServerStream: ss, -+ ctx: opentracing.ContextWithSpan(ss.Context(), serverSpan), -+ } -+ err = handler(srv, ss) -+ if err != nil { -+ SetSpanTags(serverSpan, err, false) -+ serverSpan.LogFields(log.String(""event"", ""error""), log.String(""message"", err.Error())) -+ } -+ if otgrpcOpts.decorator != nil { -+ otgrpcOpts.decorator(serverSpan, info.FullMethod, nil, nil, err) -+ } -+ return err -+ } -+} -+ -+type openTracingServerStream struct { -+ grpc.ServerStream -+ ctx context.Context -+} -+ -+func (ss *openTracingServerStream) Context() context.Context { -+ return ss.ctx -+} -+ -+func extractSpanContext(ctx context.Context, tracer opentracing.Tracer) (opentracing.SpanContext, error) { -+ md, ok := metadata.FromIncomingContext(ctx) -+ if !ok { -+ md = metadata.New(nil) -+ } -+ return tracer.Extract(opentracing.HTTPHeaders, metadataReaderWriter{md}) -+} -diff --git a/vendor/github.com/opentracing-contrib/go-observer/.gitignore b/vendor/github.com/opentracing-contrib/go-observer/.gitignore -deleted file mode 100644 -index a1338d68517ee..0000000000000 ---- a/vendor/github.com/opentracing-contrib/go-observer/.gitignore -+++ /dev/null -@@ -1,14 +0,0 @@ --# Binaries for programs and plugins --*.exe --*.dll --*.so --*.dylib -- --# Test binary, build with `go test -c` --*.test -- --# Output of the go coverage tool, specifically when used with LiteIDE --*.out -- --# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 --.glide/ -diff --git a/vendor/github.com/opentracing-contrib/go-observer/LICENSE b/vendor/github.com/opentracing-contrib/go-observer/LICENSE -deleted file mode 100644 -index 044f3dfd4748d..0000000000000 ---- a/vendor/github.com/opentracing-contrib/go-observer/LICENSE -+++ /dev/null -@@ -1,201 +0,0 @@ -- Apache License -- Version 2.0, January 2004 -- http://www.apache.org/licenses/ -- -- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -- -- 1. Definitions. -- -- ""License"" shall mean the terms and conditions for use, reproduction, -- and distribution as defined by Sections 1 through 9 of this document. -- -- ""Licensor"" shall mean the copyright owner or entity authorized by -- the copyright owner that is granting the License. -- -- ""Legal Entity"" shall mean the union of the acting entity and all -- other entities that control, are controlled by, or are under common -- control with that entity. For the purposes of this definition, -- ""control"" means (i) the power, direct or indirect, to cause the -- direction or management of such entity, whether by contract or -- otherwise, or (ii) ownership of fifty percent (50%) or more of the -- outstanding shares, or (iii) beneficial ownership of such entity. -- -- ""You"" (or ""Your"") shall mean an individual or Legal Entity -- exercising permissions granted by this License. -- -- ""Source"" form shall mean the preferred form for making modifications, -- including but not limited to software source code, documentation -- source, and configuration files. -- -- ""Object"" form shall mean any form resulting from mechanical -- transformation or translation of a Source form, including but -- not limited to compiled object code, generated documentation, -- and conversions to other media types. -- -- ""Work"" shall mean the work of authorship, whether in Source or -- Object form, made available under the License, as indicated by a -- copyright notice that is included in or attached to the work -- (an example is provided in the Appendix below). -- -- ""Derivative Works"" shall mean any work, whether in Source or Object -- form, that is based on (or derived from) the Work and for which the -- editorial revisions, annotations, elaborations, or other modifications -- represent, as a whole, an original work of authorship. For the purposes -- of this License, Derivative Works shall not include works that remain -- separable from, or merely link (or bind by name) to the interfaces of, -- the Work and Derivative Works thereof. -- -- ""Contribution"" shall mean any work of authorship, including -- the original version of the Work and any modifications or additions -- to that Work or Derivative Works thereof, that is intentionally -- submitted to Licensor for inclusion in the Work by the copyright owner -- or by an individual or Legal Entity authorized to submit on behalf of -- the copyright owner. For the purposes of this definition, ""submitted"" -- means any form of electronic, verbal, or written communication sent -- to the Licensor or its representatives, including but not limited to -- communication on electronic mailing lists, source code control systems, -- and issue tracking systems that are managed by, or on behalf of, the -- Licensor for the purpose of discussing and improving the Work, but -- excluding communication that is conspicuously marked or otherwise -- designated in writing by the copyright owner as ""Not a Contribution."" -- -- ""Contributor"" shall mean Licensor and any individual or Legal Entity -- on behalf of whom a Contribution has been received by Licensor and -- subsequently incorporated within the Work. -- -- 2. Grant of Copyright License. Subject to the terms and conditions of -- this License, each Contributor hereby grants to You a perpetual, -- worldwide, non-exclusive, no-charge, royalty-free, irrevocable -- copyright license to reproduce, prepare Derivative Works of, -- publicly display, publicly perform, sublicense, and distribute the -- Work and such Derivative Works in Source or Object form. -- -- 3. Grant of Patent License. Subject to the terms and conditions of -- this License, each Contributor hereby grants to You a perpetual, -- worldwide, non-exclusive, no-charge, royalty-free, irrevocable -- (except as stated in this section) patent license to make, have made, -- use, offer to sell, sell, import, and otherwise transfer the Work, -- where such license applies only to those patent claims licensable -- by such Contributor that are necessarily infringed by their -- Contribution(s) alone or by combination of their Contribution(s) -- with the Work to which such Contribution(s) was submitted. If You -- institute patent litigation against any entity (including a -- cross-claim or counterclaim in a lawsuit) alleging that the Work -- or a Contribution incorporated within the Work constitutes direct -- or contributory patent infringement, then any patent licenses -- granted to You under this License for that Work shall terminate -- as of the date such litigation is filed. -- -- 4. Redistribution. You may reproduce and distribute copies of the -- Work or Derivative Works thereof in any medium, with or without -- modifications, and in Source or Object form, provided that You -- meet the following conditions: -- -- (a) You must give any other recipients of the Work or -- Derivative Works a copy of this License; and -- -- (b) You must cause any modified files to carry prominent notices -- stating that You changed the files; and -- -- (c) You must retain, in the Source form of any Derivative Works -- that You distribute, all copyright, patent, trademark, and -- attribution notices from the Source form of the Work, -- excluding those notices that do not pertain to any part of -- the Derivative Works; and -- -- (d) If the Work includes a ""NOTICE"" text file as part of its -- distribution, then any Derivative Works that You distribute must -- include a readable copy of the attribution notices contained -- within such NOTICE file, excluding those notices that do not -- pertain to any part of the Derivative Works, in at least one -- of the following places: within a NOTICE text file distributed -- as part of the Derivative Works; within the Source form or -- documentation, if provided along with the Derivative Works; or, -- within a display generated by the Derivative Works, if and -- wherever such third-party notices normally appear. The contents -- of the NOTICE file are for informational purposes only and -- do not modify the License. You may add Your own attribution -- notices within Derivative Works that You distribute, alongside -- or as an addendum to the NOTICE text from the Work, provided -- that such additional attribution notices cannot be construed -- as modifying the License. -- -- You may add Your own copyright statement to Your modifications and -- may provide additional or different license terms and conditions -- for use, reproduction, or distribution of Your modifications, or -- for any such Derivative Works as a whole, provided Your use, -- reproduction, and distribution of the Work otherwise complies with -- the conditions stated in this License. -- -- 5. Submission of Contributions. Unless You explicitly state otherwise, -- any Contribution intentionally submitted for inclusion in the Work -- by You to the Licensor shall be under the terms and conditions of -- this License, without any additional terms or conditions. -- Notwithstanding the above, nothing herein shall supersede or modify -- the terms of any separate license agreement you may have executed -- with Licensor regarding such Contributions. -- -- 6. Trademarks. This License does not grant permission to use the trade -- names, trademarks, service marks, or product names of the Licensor, -- except as required for reasonable and customary use in describing the -- origin of the Work and reproducing the content of the NOTICE file. -- -- 7. Disclaimer of Warranty. Unless required by applicable law or -- agreed to in writing, Licensor provides the Work (and each -- Contributor provides its Contributions) on an ""AS IS"" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -- implied, including, without limitation, any warranties or conditions -- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A -- PARTICULAR PURPOSE. You are solely responsible for determining the -- appropriateness of using or redistributing the Work and assume any -- risks associated with Your exercise of permissions under this License. -- -- 8. Limitation of Liability. In no event and under no legal theory, -- whether in tort (including negligence), contract, or otherwise, -- unless required by applicable law (such as deliberate and grossly -- negligent acts) or agreed to in writing, shall any Contributor be -- liable to You for damages, including any direct, indirect, special, -- incidental, or consequential damages of any character arising as a -- result of this License or out of the use or inability to use the -- Work (including but not limited to damages for loss of goodwill, -- work stoppage, computer failure or malfunction, or any and all -- other commercial damages or losses), even if such Contributor -- has been advised of the possibility of such damages. -- -- 9. Accepting Warranty or Additional Liability. While redistributing -- the Work or Derivative Works thereof, You may choose to offer, -- and charge a fee for, acceptance of support, warranty, indemnity, -- or other liability obligations and/or rights consistent with this -- License. However, in accepting such obligations, You may act only -- on Your own behalf and on Your sole responsibility, not on behalf -- of any other Contributor, and only if You agree to indemnify, -- defend, and hold each Contributor harmless for any liability -- incurred by, or claims asserted against, such Contributor by reason -- of your accepting any such warranty or additional liability. -- -- END OF TERMS AND CONDITIONS -- -- APPENDIX: How to apply the Apache License to your work. -- -- To apply the Apache License to your work, attach the following -- boilerplate notice, with the fields enclosed by brackets ""{}"" -- replaced with your own identifying information. (Don't include -- the brackets!) The text should be enclosed in the appropriate -- comment syntax for the file format. We also recommend that a -- file or class name and description of purpose be included on the -- same ""printed page"" as the copyright notice for easier -- identification within third-party archives. -- -- Copyright (c) 2017 opentracing-contrib -- -- Licensed under the Apache License, Version 2.0 (the ""License""); -- you may not use this file except in compliance with the License. -- You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an ""AS IS"" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the License for the specific language governing permissions and -- limitations under the License. -diff --git a/vendor/github.com/opentracing-contrib/go-observer/README.md b/vendor/github.com/opentracing-contrib/go-observer/README.md -deleted file mode 100644 -index 334a9aa6b6627..0000000000000 ---- a/vendor/github.com/opentracing-contrib/go-observer/README.md -+++ /dev/null -@@ -1,64 +0,0 @@ --# An Observer API for OpenTracing-go Tracers -- --OTObserver can be used to watch the span events like StartSpan(), --SetOperationName(), SetTag() and Finish(). A need for observers --arose when information (metrics) more than just the latency information was --required for the spans, in the distributed tracers. But, there can be a lot --of metrics in different domains and adding such metrics to any one (client) --tracer breaks cross-platform compatibility. There are various ways to --avoid such issues, however, an observer pattern is cleaner and provides loose --coupling between the packages exporting metrics (on span events) and the --tracer. -- --This information can be in the form of hardware metrics, RPC metrics, --useful metrics exported out of the kernel or other metrics, profiled for a --span. These additional metrics can help us in getting better Root-cause --analysis. With that being said, its not just for calculation of metrics, --it can be used for anything which needs watching the span events. -- --## Installation and Usage -- --The `otobserver` package provides an API to watch span's events and define --callbacks for these events. This API would be a functional option to a --tracer constructor that passes an Observer. 3rd party packages (who want to --watch the span events) should actually implement this observer API. --To do that, first fetch the package using go get : -- --``` -- go get -v github.com/opentracing-contrib/go-observer --``` -- --and say : -- --```go -- import ""github.com/opentracing-contrib/go-observer"" --``` -- --and then, define the required span event callbacks. These registered --callbacks would then be called on span events if an observer is created. --Tracer may allow registering multiple observers. Have a look at the [jaeger's observer](https://github.com/uber/jaeger-client-go/blob/master/observer.go). -- --With the required setup implemented in the backend tracers, packages --watching the span events need to implement the observer api defining what --they need to do for the observed span events. -- --## Span events -- --An observer registered with this api, can observe for the following four --span events : -- --```go -- StartSpan() -- SetOperationName() -- SetTag() -- Finish() --``` -- --### Tradeoffs -- --As noble as our thoughts might be in fetching additional metrics (other than --latency) for a span using an observer, there are some overhead costs. Not all --observers need to observe all the span events, in which case, we may have --to keep some callback functions empty. In effect, we will still call these --functions, and that will incur unnecessary overhead. To know more about this --and other tradeoffs, see this [discussion](https://github.com/opentracing/opentracing-go/pull/135#discussion_r105497329). -diff --git a/vendor/github.com/opentracing-contrib/go-observer/observer.go b/vendor/github.com/opentracing-contrib/go-observer/observer.go -deleted file mode 100644 -index c8cbf61bd555b..0000000000000 ---- a/vendor/github.com/opentracing-contrib/go-observer/observer.go -+++ /dev/null -@@ -1,39 +0,0 @@ --// This project is licensed under the Apache License 2.0, see LICENSE. -- --package otobserver -- --import opentracing ""github.com/opentracing/opentracing-go"" -- --// Observer can be registered with a Tracer to recieve notifications --// about new Spans. Tracers are not required to support the Observer API. --// The actual registration depends on the implementation, which might look --// like the below e.g : --// observer := myobserver.NewObserver() --// tracer := client.NewTracer(..., client.WithObserver(observer)) --// --type Observer interface { -- // Create and return a span observer. Called when a span starts. -- // If the Observer is not interested in the given span, it must return (nil, false). -- // E.g : -- // func StartSpan(opName string, opts ...opentracing.StartSpanOption) { -- // var sp opentracing.Span -- // sso := opentracing.StartSpanOptions{} -- // spanObserver, ok := observer.OnStartSpan(span, opName, sso); -- // if ok { -- // // we have a valid SpanObserver -- // } -- // ... -- // } -- OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (SpanObserver, bool) --} -- --// SpanObserver is created by the Observer and receives notifications about --// other Span events. --type SpanObserver interface { -- // Callback called from opentracing.Span.SetOperationName() -- OnSetOperationName(operationName string) -- // Callback called from opentracing.Span.SetTag() -- OnSetTag(key string, value interface{}) -- // Callback called from opentracing.Span.Finish() -- OnFinish(options opentracing.FinishOptions) --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/.gitignore b/vendor/github.com/openzipkin/zipkin-go-opentracing/.gitignore -deleted file mode 100644 -index 37721f69f4e34..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/.gitignore -+++ /dev/null -@@ -1,4 +0,0 @@ --build/* --.idea/ --.project --examples/**/build -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/.travis.yml b/vendor/github.com/openzipkin/zipkin-go-opentracing/.travis.yml -deleted file mode 100644 -index 8a05cdc0b66cc..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/.travis.yml -+++ /dev/null -@@ -1,12 +0,0 @@ --language: go -- --go: -- - 1.8 -- - 1.9 -- - tip -- --install: -- - go get -d -t ./... -- - go get -u github.com/golang/lint/... --script: -- - make test vet lint bench -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/LICENSE b/vendor/github.com/openzipkin/zipkin-go-opentracing/LICENSE -deleted file mode 100644 -index 66fff971dea0a..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/LICENSE -+++ /dev/null -@@ -1,22 +0,0 @@ --The MIT License (MIT) -- --Copyright (c) 2016 The OpenTracing Authors --Copyright (c) 2016 Bas van Beek -- --Permission is hereby granted, free of charge, to any person obtaining a copy --of this software and associated documentation files (the ""Software""), to deal --in the Software without restriction, including without limitation the rights --to use, copy, modify, merge, publish, distribute, sublicense, and/or sell --copies of the Software, and to permit persons to whom the Software is --furnished to do so, subject to the following conditions: -- --The above copyright notice and this permission notice shall be included in all --copies or substantial portions of the Software. -- --THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR --IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, --FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE --AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER --LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, --OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE --SOFTWARE. -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/Makefile b/vendor/github.com/openzipkin/zipkin-go-opentracing/Makefile -deleted file mode 100644 -index d0951ed452a2a..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/Makefile -+++ /dev/null -@@ -1,26 +0,0 @@ -- --.DEFAULT_GOAL := test -- --.PHONY: test --test: -- go test -v -race -cover ./... -- --.PHONY: bench --bench: -- go test -v -run - -bench . -benchmem ./... -- --.PHONY: lint --lint: -- # Ignore grep's exit code since no match returns 1. -- -golint ./... | grep --invert-match -E '^.*\.pb\.go|^thrift' -- @ -- @! (golint ./... | grep --invert-match -E '^.*\.pb\.go|^thrift' | read dummy) -- --.PHONY: vet --vet: -- go vet ./... -- --.PHONY: all --all: vet lint test bench -- --.PHONY: example -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/README.md b/vendor/github.com/openzipkin/zipkin-go-opentracing/README.md -deleted file mode 100644 -index a3010843d579a..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/README.md -+++ /dev/null -@@ -1,29 +0,0 @@ --# zipkin-go-opentracing -- --[![Travis CI](https://travis-ci.org/openzipkin/zipkin-go-opentracing.svg?branch=master)](https://travis-ci.org/openzipkin/zipkin-go-opentracing) --[![CircleCI](https://circleci.com/gh/openzipkin/zipkin-go-opentracing.svg?style=shield)](https://circleci.com/gh/openzipkin/zipkin-go-opentracing) --[![GoDoc](https://godoc.org/github.com/openzipkin/zipkin-go-opentracing?status.svg)](https://godoc.org/github.com/openzipkin/zipkin-go-opentracing) --[![Go Report Card](https://goreportcard.com/badge/github.com/openzipkin/zipkin-go-opentracing)](https://goreportcard.com/report/github.com/openzipkin/zipkin-go-opentracing) --[![Sourcegraph](https://sourcegraph.com/github.com/openzipkin/zipkin-go-opentracing/-/badge.svg)](https://sourcegraph.com/github.com/openzipkin/zipkin-go-opentracing?badge) -- --[OpenTracing](http://opentracing.io) Tracer implementation for [Zipkin](http://zipkin.io) in Go. -- --### Notes -- --This package is a low level tracing ""driver"" to allow OpenTracing API consumers --to use Zipkin as their tracing backend. For details on how to work with spans --and traces we suggest looking at the documentation and README from the --[OpenTracing API](https://github.com/opentracing/opentracing-go). -- --For developers interested in adding Zipkin tracing to their Go services we --suggest looking at [Go kit](https://gokit.io) which is an excellent toolkit to --instrument your distributed system with Zipkin and much more with clean --separation of domains like transport, middleware / instrumentation and --business logic. -- --### Examples -- --For more information on zipkin-go-opentracing, please see the --[examples](https://github.com/openzipkin/zipkin-go-opentracing/tree/master/examples) --directory for usage examples as well as documentation at --[go doc](https://godoc.org/github.com/openzipkin/zipkin-go-opentracing). -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/circle.yml b/vendor/github.com/openzipkin/zipkin-go-opentracing/circle.yml -deleted file mode 100644 -index 30d5ef4b6a8e4..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/circle.yml -+++ /dev/null -@@ -1,10 +0,0 @@ --dependencies: -- override: -- - sudo rm -rf /home/ubuntu/.go_workspace/src/github.com/openzipkin -- - mkdir -p /home/ubuntu/.go_workspace/src/github.com/openzipkin -- - mv /home/ubuntu/zipkin-go-opentracing /home/ubuntu/.go_workspace/src/github.com/openzipkin -- - ln -s /home/ubuntu/.go_workspace/src/github.com/openzipkin/zipkin-go-opentracing /home/ubuntu/zipkin-go-opentracing -- - go get -u -t -v github.com/openzipkin/zipkin-go-opentracing/... --test: -- override: -- - make test bench -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-http.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-http.go -deleted file mode 100644 -index 6fb7b8c7ae25e..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-http.go -+++ /dev/null -@@ -1,234 +0,0 @@ --package zipkintracer -- --import ( -- ""bytes"" -- ""net/http"" -- ""sync"" -- ""time"" -- -- ""github.com/apache/thrift/lib/go/thrift"" -- -- ""github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"" --) -- --// Default timeout for http request in seconds --const defaultHTTPTimeout = time.Second * 5 -- --// defaultBatchInterval in seconds --const defaultHTTPBatchInterval = 1 -- --const defaultHTTPBatchSize = 100 -- --const defaultHTTPMaxBacklog = 1000 -- --// HTTPCollector implements Collector by forwarding spans to a http server. --type HTTPCollector struct { -- logger Logger -- url string -- client *http.Client -- batchInterval time.Duration -- batchSize int -- maxBacklog int -- batch []*zipkincore.Span -- spanc chan *zipkincore.Span -- quit chan struct{} -- shutdown chan error -- sendMutex *sync.Mutex -- batchMutex *sync.Mutex -- reqCallback RequestCallback --} -- --// RequestCallback receives the initialized request from the Collector before --// sending it over the wire. This allows one to plug in additional headers or --// do other customization. --type RequestCallback func(*http.Request) -- --// HTTPOption sets a parameter for the HttpCollector --type HTTPOption func(c *HTTPCollector) -- --// HTTPLogger sets the logger used to report errors in the collection --// process. By default, a no-op logger is used, i.e. no errors are logged --// anywhere. It's important to set this option in a production service. --func HTTPLogger(logger Logger) HTTPOption { -- return func(c *HTTPCollector) { c.logger = logger } --} -- --// HTTPTimeout sets maximum timeout for http request. --func HTTPTimeout(duration time.Duration) HTTPOption { -- return func(c *HTTPCollector) { c.client.Timeout = duration } --} -- --// HTTPBatchSize sets the maximum batch size, after which a collect will be --// triggered. The default batch size is 100 traces. --func HTTPBatchSize(n int) HTTPOption { -- return func(c *HTTPCollector) { c.batchSize = n } --} -- --// HTTPMaxBacklog sets the maximum backlog size, --// when batch size reaches this threshold, spans from the --// beginning of the batch will be disposed --func HTTPMaxBacklog(n int) HTTPOption { -- return func(c *HTTPCollector) { c.maxBacklog = n } --} -- --// HTTPBatchInterval sets the maximum duration we will buffer traces before --// emitting them to the collector. The default batch interval is 1 second. --func HTTPBatchInterval(d time.Duration) HTTPOption { -- return func(c *HTTPCollector) { c.batchInterval = d } --} -- --// HTTPClient sets a custom http client to use. --func HTTPClient(client *http.Client) HTTPOption { -- return func(c *HTTPCollector) { c.client = client } --} -- --// HTTPRequestCallback registers a callback function to adjust the collector --// *http.Request before it sends the request to Zipkin. --func HTTPRequestCallback(rc RequestCallback) HTTPOption { -- return func(c *HTTPCollector) { c.reqCallback = rc } --} -- --// NewHTTPCollector returns a new HTTP-backend Collector. url should be a http --// url for handle post request. timeout is passed to http client. queueSize control --// the maximum size of buffer of async queue. The logger is used to log errors, --// such as send failures; --func NewHTTPCollector(url string, options ...HTTPOption) (Collector, error) { -- c := &HTTPCollector{ -- logger: NewNopLogger(), -- url: url, -- client: &http.Client{Timeout: defaultHTTPTimeout}, -- batchInterval: defaultHTTPBatchInterval * time.Second, -- batchSize: defaultHTTPBatchSize, -- maxBacklog: defaultHTTPMaxBacklog, -- batch: []*zipkincore.Span{}, -- spanc: make(chan *zipkincore.Span), -- quit: make(chan struct{}, 1), -- shutdown: make(chan error, 1), -- sendMutex: &sync.Mutex{}, -- batchMutex: &sync.Mutex{}, -- } -- -- for _, option := range options { -- option(c) -- } -- -- go c.loop() -- return c, nil --} -- --// Collect implements Collector. --func (c *HTTPCollector) Collect(s *zipkincore.Span) error { -- c.spanc <- s -- return nil --} -- --// Close implements Collector. --func (c *HTTPCollector) Close() error { -- close(c.quit) -- return <-c.shutdown --} -- --func httpSerialize(spans []*zipkincore.Span) *bytes.Buffer { -- t := thrift.NewTMemoryBuffer() -- p := thrift.NewTBinaryProtocolTransport(t) -- if err := p.WriteListBegin(thrift.STRUCT, len(spans)); err != nil { -- panic(err) -- } -- for _, s := range spans { -- if err := s.Write(p); err != nil { -- panic(err) -- } -- } -- if err := p.WriteListEnd(); err != nil { -- panic(err) -- } -- return t.Buffer --} -- --func (c *HTTPCollector) loop() { -- var ( -- nextSend = time.Now().Add(c.batchInterval) -- ticker = time.NewTicker(c.batchInterval / 10) -- tickc = ticker.C -- ) -- defer ticker.Stop() -- -- for { -- select { -- case span := <-c.spanc: -- currentBatchSize := c.append(span) -- if currentBatchSize >= c.batchSize { -- nextSend = time.Now().Add(c.batchInterval) -- go c.send() -- } -- case <-tickc: -- if time.Now().After(nextSend) { -- nextSend = time.Now().Add(c.batchInterval) -- go c.send() -- } -- case <-c.quit: -- c.shutdown <- c.send() -- return -- } -- } --} -- --func (c *HTTPCollector) append(span *zipkincore.Span) (newBatchSize int) { -- c.batchMutex.Lock() -- defer c.batchMutex.Unlock() -- -- c.batch = append(c.batch, span) -- if len(c.batch) > c.maxBacklog { -- dispose := len(c.batch) - c.maxBacklog -- c.logger.Log(""msg"", ""backlog too long, disposing spans."", ""count"", dispose) -- c.batch = c.batch[dispose:] -- } -- newBatchSize = len(c.batch) -- return --} -- --func (c *HTTPCollector) send() error { -- // in order to prevent sending the same batch twice -- c.sendMutex.Lock() -- defer c.sendMutex.Unlock() -- -- // Select all current spans in the batch to be sent -- c.batchMutex.Lock() -- sendBatch := c.batch[:] -- c.batchMutex.Unlock() -- -- // Do not send an empty batch -- if len(sendBatch) == 0 { -- return nil -- } -- -- req, err := http.NewRequest( -- ""POST"", -- c.url, -- httpSerialize(sendBatch)) -- if err != nil { -- c.logger.Log(""err"", err.Error()) -- return err -- } -- req.Header.Set(""Content-Type"", ""application/x-thrift"") -- if c.reqCallback != nil { -- c.reqCallback(req) -- } -- resp, err := c.client.Do(req) -- if err != nil { -- c.logger.Log(""err"", err.Error()) -- return err -- } -- resp.Body.Close() -- // non 2xx code -- if resp.StatusCode < 200 || resp.StatusCode >= 300 { -- c.logger.Log(""err"", ""HTTP POST span failed"", ""code"", resp.Status) -- } -- -- // Remove sent spans from the batch -- c.batchMutex.Lock() -- c.batch = c.batch[len(sendBatch):] -- c.batchMutex.Unlock() -- -- return nil --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-kafka.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-kafka.go -deleted file mode 100644 -index eb18c3f36438f..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-kafka.go -+++ /dev/null -@@ -1,95 +0,0 @@ --package zipkintracer -- --import ( -- ""github.com/Shopify/sarama"" -- ""github.com/apache/thrift/lib/go/thrift"" -- -- ""github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"" --) -- --// defaultKafkaTopic sets the standard Kafka topic our Collector will publish --// on. The default topic for zipkin-receiver-kafka is ""zipkin"", see: --// https://github.com/openzipkin/zipkin/tree/master/zipkin-receiver-kafka --const defaultKafkaTopic = ""zipkin"" -- --// KafkaCollector implements Collector by publishing spans to a Kafka --// broker. --type KafkaCollector struct { -- producer sarama.AsyncProducer -- logger Logger -- topic string --} -- --// KafkaOption sets a parameter for the KafkaCollector --type KafkaOption func(c *KafkaCollector) -- --// KafkaLogger sets the logger used to report errors in the collection --// process. By default, a no-op logger is used, i.e. no errors are logged --// anywhere. It's important to set this option. --func KafkaLogger(logger Logger) KafkaOption { -- return func(c *KafkaCollector) { c.logger = logger } --} -- --// KafkaProducer sets the producer used to produce to Kafka. --func KafkaProducer(p sarama.AsyncProducer) KafkaOption { -- return func(c *KafkaCollector) { c.producer = p } --} -- --// KafkaTopic sets the kafka topic to attach the collector producer on. --func KafkaTopic(t string) KafkaOption { -- return func(c *KafkaCollector) { c.topic = t } --} -- --// NewKafkaCollector returns a new Kafka-backed Collector. addrs should be a --// slice of TCP endpoints of the form ""host:port"". --func NewKafkaCollector(addrs []string, options ...KafkaOption) (Collector, error) { -- c := &KafkaCollector{ -- logger: NewNopLogger(), -- topic: defaultKafkaTopic, -- } -- -- for _, option := range options { -- option(c) -- } -- if c.producer == nil { -- p, err := sarama.NewAsyncProducer(addrs, nil) -- if err != nil { -- return nil, err -- } -- c.producer = p -- } -- -- go c.logErrors() -- -- return c, nil --} -- --func (c *KafkaCollector) logErrors() { -- for pe := range c.producer.Errors() { -- _ = c.logger.Log(""msg"", pe.Msg, ""err"", pe.Err, ""result"", ""failed to produce msg"") -- } --} -- --// Collect implements Collector. --func (c *KafkaCollector) Collect(s *zipkincore.Span) error { -- c.producer.Input() <- &sarama.ProducerMessage{ -- Topic: c.topic, -- Key: nil, -- Value: sarama.ByteEncoder(kafkaSerialize(s)), -- } -- return nil --} -- --// Close implements Collector. --func (c *KafkaCollector) Close() error { -- return c.producer.Close() --} -- --func kafkaSerialize(s *zipkincore.Span) []byte { -- t := thrift.NewTMemoryBuffer() -- p := thrift.NewTBinaryProtocolTransport(t) -- if err := s.Write(p); err != nil { -- panic(err) -- } -- return t.Buffer.Bytes() --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-scribe.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-scribe.go -deleted file mode 100644 -index 1ff353aa07b4b..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-scribe.go -+++ /dev/null -@@ -1,235 +0,0 @@ --package zipkintracer -- --import ( -- ""context"" -- ""encoding/base64"" -- ""fmt"" -- ""net"" -- ""sync"" -- ""time"" -- -- ""github.com/apache/thrift/lib/go/thrift"" -- -- ""github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe"" -- ""github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"" --) -- --const defaultScribeCategory = ""zipkin"" -- --// defaultScribeBatchInterval in seconds --const defaultScribeBatchInterval = 1 -- --const defaultScribeBatchSize = 100 -- --const defaultScribeMaxBacklog = 1000 -- --// ScribeCollector implements Collector by forwarding spans to a Scribe --// service, in batches. --type ScribeCollector struct { -- logger Logger -- category string -- factory func() (scribe.Scribe, error) -- client scribe.Scribe -- batchInterval time.Duration -- batchSize int -- maxBacklog int -- batch []*scribe.LogEntry -- spanc chan *zipkincore.Span -- quit chan struct{} -- shutdown chan error -- sendMutex *sync.Mutex -- batchMutex *sync.Mutex --} -- --// ScribeOption sets a parameter for the StdlibAdapter. --type ScribeOption func(s *ScribeCollector) -- --// ScribeLogger sets the logger used to report errors in the collection --// process. By default, a no-op logger is used, i.e. no errors are logged --// anywhere. It's important to set this option in a production service. --func ScribeLogger(logger Logger) ScribeOption { -- return func(s *ScribeCollector) { s.logger = logger } --} -- --// ScribeBatchSize sets the maximum batch size, after which a collect will be --// triggered. The default batch size is 100 traces. --func ScribeBatchSize(n int) ScribeOption { -- return func(s *ScribeCollector) { s.batchSize = n } --} -- --// ScribeMaxBacklog sets the maximum backlog size, --// when batch size reaches this threshold, spans from the --// beginning of the batch will be disposed --func ScribeMaxBacklog(n int) ScribeOption { -- return func(c *ScribeCollector) { c.maxBacklog = n } --} -- --// ScribeBatchInterval sets the maximum duration we will buffer traces before --// emitting them to the collector. The default batch interval is 1 second. --func ScribeBatchInterval(d time.Duration) ScribeOption { -- return func(s *ScribeCollector) { s.batchInterval = d } --} -- --// ScribeCategory sets the Scribe category used to transmit the spans. --func ScribeCategory(category string) ScribeOption { -- return func(s *ScribeCollector) { s.category = category } --} -- --// NewScribeCollector returns a new Scribe-backed Collector. addr should be a --// TCP endpoint of the form ""host:port"". timeout is passed to the Thrift dial --// function NewTSocketFromAddrTimeout. batchSize and batchInterval control the --// maximum size and interval of a batch of spans; as soon as either limit is --// reached, the batch is sent. The logger is used to log errors, such as batch --// send failures; users should provide an appropriate context, if desired. --func NewScribeCollector(addr string, timeout time.Duration, options ...ScribeOption) (Collector, error) { -- factory := scribeClientFactory(addr, timeout) -- client, err := factory() -- if err != nil { -- return nil, err -- } -- c := &ScribeCollector{ -- logger: NewNopLogger(), -- category: defaultScribeCategory, -- factory: factory, -- client: client, -- batchInterval: defaultScribeBatchInterval * time.Second, -- batchSize: defaultScribeBatchSize, -- maxBacklog: defaultScribeMaxBacklog, -- batch: []*scribe.LogEntry{}, -- spanc: make(chan *zipkincore.Span), -- quit: make(chan struct{}), -- shutdown: make(chan error, 1), -- sendMutex: &sync.Mutex{}, -- batchMutex: &sync.Mutex{}, -- } -- -- for _, option := range options { -- option(c) -- } -- -- go c.loop() -- return c, nil --} -- --// Collect implements Collector. --func (c *ScribeCollector) Collect(s *zipkincore.Span) error { -- c.spanc <- s -- return nil // accepted --} -- --// Close implements Collector. --func (c *ScribeCollector) Close() error { -- close(c.quit) -- return <-c.shutdown --} -- --func scribeSerialize(s *zipkincore.Span) string { -- t := thrift.NewTMemoryBuffer() -- p := thrift.NewTBinaryProtocolTransport(t) -- if err := s.Write(p); err != nil { -- panic(err) -- } -- return base64.StdEncoding.EncodeToString(t.Buffer.Bytes()) --} -- --func (c *ScribeCollector) loop() { -- var ( -- nextSend = time.Now().Add(c.batchInterval) -- ticker = time.NewTicker(c.batchInterval / 10) -- tickc = ticker.C -- ) -- defer ticker.Stop() -- -- for { -- select { -- case span := <-c.spanc: -- currentBatchSize := c.append(span) -- if currentBatchSize >= c.batchSize { -- nextSend = time.Now().Add(c.batchInterval) -- go c.send() -- } -- case <-tickc: -- if time.Now().After(nextSend) { -- nextSend = time.Now().Add(c.batchInterval) -- go c.send() -- } -- case <-c.quit: -- c.shutdown <- c.send() -- return -- } -- } --} -- --func (c *ScribeCollector) append(span *zipkincore.Span) (newBatchSize int) { -- c.batchMutex.Lock() -- defer c.batchMutex.Unlock() -- -- c.batch = append(c.batch, &scribe.LogEntry{ -- Category: c.category, -- Message: scribeSerialize(span), -- }) -- if len(c.batch) > c.maxBacklog { -- dispose := len(c.batch) - c.maxBacklog -- c.logger.Log(""Backlog too long, disposing spans."", ""count"", dispose) -- c.batch = c.batch[dispose:] -- } -- newBatchSize = len(c.batch) -- return --} -- --func (c *ScribeCollector) send() error { -- // in order to prevent sending the same batch twice -- c.sendMutex.Lock() -- defer c.sendMutex.Unlock() -- -- // Select all current spans in the batch to be sent -- c.batchMutex.Lock() -- sendBatch := c.batch[:] -- c.batchMutex.Unlock() -- -- // Do not send an empty batch -- if len(sendBatch) == 0 { -- return nil -- } -- -- if c.client == nil { -- var err error -- if c.client, err = c.factory(); err != nil { -- _ = c.logger.Log(""err"", fmt.Sprintf(""during reconnect: %v"", err)) -- return err -- } -- } -- if rc, err := c.client.Log(context.Background(), sendBatch); err != nil { -- c.client = nil -- _ = c.logger.Log(""err"", fmt.Sprintf(""during Log: %v"", err)) -- return err -- } else if rc != scribe.ResultCode_OK { -- // probably transient error; don't reset client -- _ = c.logger.Log(""err"", fmt.Sprintf(""remote returned %s"", rc)) -- } -- -- // Remove sent spans from the batch -- c.batchMutex.Lock() -- c.batch = c.batch[len(sendBatch):] -- c.batchMutex.Unlock() -- -- return nil --} -- --func scribeClientFactory(addr string, timeout time.Duration) func() (scribe.Scribe, error) { -- return func() (scribe.Scribe, error) { -- a, err := net.ResolveTCPAddr(""tcp"", addr) -- if err != nil { -- return nil, err -- } -- socket := thrift.NewTSocketFromAddrTimeout(a, timeout) -- transport := thrift.NewTFramedTransport(socket) -- if err := transport.Open(); err != nil { -- _ = socket.Close() -- return nil, err -- } -- proto := thrift.NewTBinaryProtocolTransport(transport) -- client := scribe.NewScribeClientProtocol(transport, proto, proto) -- return client, nil -- } --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector.go -deleted file mode 100644 -index f8cfb58e3a241..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector.go -+++ /dev/null -@@ -1,77 +0,0 @@ --package zipkintracer -- --import ( -- ""strings"" -- -- ""github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"" --) -- --// Collector represents a Zipkin trace collector, which is probably a set of --// remote endpoints. --type Collector interface { -- Collect(*zipkincore.Span) error -- Close() error --} -- --// NopCollector implements Collector but performs no work. --type NopCollector struct{} -- --// Collect implements Collector. --func (NopCollector) Collect(*zipkincore.Span) error { return nil } -- --// Close implements Collector. --func (NopCollector) Close() error { return nil } -- --// MultiCollector implements Collector by sending spans to all collectors. --type MultiCollector []Collector -- --// Collect implements Collector. --func (c MultiCollector) Collect(s *zipkincore.Span) error { -- return c.aggregateErrors(func(coll Collector) error { return coll.Collect(s) }) --} -- --// Close implements Collector. --func (c MultiCollector) Close() error { -- return c.aggregateErrors(func(coll Collector) error { return coll.Close() }) --} -- --func (c MultiCollector) aggregateErrors(f func(c Collector) error) error { -- var e *collectionError -- for i, collector := range c { -- if err := f(collector); err != nil { -- if e == nil { -- e = &collectionError{ -- errs: make([]error, len(c)), -- } -- } -- e.errs[i] = err -- } -- } -- return e --} -- --// CollectionError represents an array of errors returned by one or more --// failed Collector methods. --type CollectionError interface { -- Error() string -- GetErrors() []error --} -- --type collectionError struct { -- errs []error --} -- --func (c *collectionError) Error() string { -- errs := []string{} -- for _, err := range c.errs { -- if err != nil { -- errs = append(errs, err.Error()) -- } -- } -- return strings.Join(errs, ""; "") --} -- --// GetErrors implements CollectionError --func (c *collectionError) GetErrors() []error { -- return c.errs --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/context.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/context.go -deleted file mode 100644 -index e9fe299118fc0..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/context.go -+++ /dev/null -@@ -1,61 +0,0 @@ --package zipkintracer -- --import ( -- ""github.com/openzipkin/zipkin-go-opentracing/flag"" -- ""github.com/openzipkin/zipkin-go-opentracing/types"" --) -- --// SpanContext holds the basic Span metadata. --type SpanContext struct { -- // A probabilistically unique identifier for a [multi-span] trace. -- TraceID types.TraceID -- -- // A probabilistically unique identifier for a span. -- SpanID uint64 -- -- // Whether the trace is sampled. -- Sampled bool -- -- // The span's associated baggage. -- Baggage map[string]string // initialized on first use -- -- // The SpanID of this Context's parent, or nil if there is no parent. -- ParentSpanID *uint64 -- -- // Flags provides the ability to create and communicate feature flags. -- Flags flag.Flags -- -- // Whether the span is owned by the current process -- Owner bool --} -- --// ForeachBaggageItem belongs to the opentracing.SpanContext interface --func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) { -- for k, v := range c.Baggage { -- if !handler(k, v) { -- break -- } -- } --} -- --// WithBaggageItem returns an entirely new basictracer SpanContext with the --// given key:value baggage pair set. --func (c SpanContext) WithBaggageItem(key, val string) SpanContext { -- var newBaggage map[string]string -- if c.Baggage == nil { -- newBaggage = map[string]string{key: val} -- } else { -- newBaggage = make(map[string]string, len(c.Baggage)+1) -- for k, v := range c.Baggage { -- newBaggage[k] = v -- } -- newBaggage[key] = val -- } -- var parentSpanID *uint64 -- if c.ParentSpanID != nil { -- parentSpanID = new(uint64) -- *parentSpanID = *c.ParentSpanID -- } -- // Use positional parameters so the compiler will help catch new fields. -- return SpanContext{c.TraceID, c.SpanID, c.Sampled, newBaggage, parentSpanID, c.Flags, c.Owner} --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/debug.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/debug.go -deleted file mode 100644 -index 1ee00c8a689dc..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/debug.go -+++ /dev/null -@@ -1,78 +0,0 @@ --package zipkintracer -- --import ( -- ""bytes"" -- ""fmt"" -- ""runtime"" -- ""strconv"" -- ""sync"" --) -- --const debugGoroutineIDTag = ""_initial_goroutine"" -- --type errAssertionFailed struct { -- span *spanImpl -- msg string --} -- --// Error implements the error interface. --func (err *errAssertionFailed) Error() string { -- return fmt.Sprintf(""%s:\n%+v"", err.msg, err.span) --} -- --func (s *spanImpl) Lock() { -- s.Mutex.Lock() -- s.maybeAssertSanityLocked() --} -- --func (s *spanImpl) maybeAssertSanityLocked() { -- if s.tracer == nil { -- s.Mutex.Unlock() -- panic(&errAssertionFailed{span: s, msg: ""span used after call to Finish()""}) -- } -- if s.tracer.options.debugAssertSingleGoroutine { -- startID := curGoroutineID() -- curID, ok := s.raw.Tags[debugGoroutineIDTag].(uint64) -- if !ok { -- // This is likely invoked in the context of the SetTag which sets -- // debugGoroutineTag. -- return -- } -- if startID != curID { -- s.Mutex.Unlock() -- panic(&errAssertionFailed{ -- span: s, -- msg: fmt.Sprintf(""span started on goroutine %d, but now running on %d"", startID, curID), -- }) -- } -- } --} -- --var goroutineSpace = []byte(""goroutine "") --var littleBuf = sync.Pool{ -- New: func() interface{} { -- buf := make([]byte, 64) -- return &buf -- }, --} -- --// Credit to @bradfitz: --// https://github.com/golang/net/blob/master/http2/gotrack.go#L51 --func curGoroutineID() uint64 { -- bp := littleBuf.Get().(*[]byte) -- defer littleBuf.Put(bp) -- b := *bp -- b = b[:runtime.Stack(b, false)] -- // Parse the 4707 out of ""goroutine 4707 ["" -- b = bytes.TrimPrefix(b, goroutineSpace) -- i := bytes.IndexByte(b, ' ') -- if i < 0 { -- panic(fmt.Sprintf(""No space found in %q"", b)) -- } -- b = b[:i] -- n, err := strconv.ParseUint(string(b), 10, 64) -- if err != nil { -- panic(fmt.Sprintf(""Failed to parse goroutine ID out of %q: %v"", b, err)) -- } -- return n --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/event.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/event.go -deleted file mode 100644 -index 31b6a009eddb7..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/event.go -+++ /dev/null -@@ -1,62 +0,0 @@ --package zipkintracer -- --import ""github.com/opentracing/opentracing-go"" -- --// A SpanEvent is emitted when a mutating command is called on a Span. --type SpanEvent interface{} -- --// EventCreate is emitted when a Span is created. --type EventCreate struct{ OperationName string } -- --// EventTag is received when SetTag is called. --type EventTag struct { -- Key string -- Value interface{} --} -- --// EventBaggage is received when SetBaggageItem is called. --type EventBaggage struct { -- Key, Value string --} -- --// EventLogFields is received when LogFields or LogKV is called. --type EventLogFields opentracing.LogRecord -- --// EventLog is received when Log (or one of its derivatives) is called. --// --// DEPRECATED --type EventLog opentracing.LogData -- --// EventFinish is received when Finish is called. --type EventFinish RawSpan -- --func (s *spanImpl) onCreate(opName string) { -- if s.event != nil { -- s.event(EventCreate{OperationName: opName}) -- } --} --func (s *spanImpl) onTag(key string, value interface{}) { -- if s.event != nil { -- s.event(EventTag{Key: key, Value: value}) -- } --} --func (s *spanImpl) onLog(ld opentracing.LogData) { -- if s.event != nil { -- s.event(EventLog(ld)) -- } --} --func (s *spanImpl) onLogFields(lr opentracing.LogRecord) { -- if s.event != nil { -- s.event(EventLogFields(lr)) -- } --} --func (s *spanImpl) onBaggage(key, value string) { -- if s.event != nil { -- s.event(EventBaggage{Key: key, Value: value}) -- } --} --func (s *spanImpl) onFinish(sp RawSpan) { -- if s.event != nil { -- s.event(EventFinish(sp)) -- } --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/flag/flags.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/flag/flags.go -deleted file mode 100644 -index 05cb10ea39156..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/flag/flags.go -+++ /dev/null -@@ -1,39 +0,0 @@ --package flag -- --// Flags provides the ability to create and communicate feature flags. --type Flags uint64 -- --// Flags is a bitset --const ( -- Debug Flags = 1 << 0 -- -- // All flags below deal with binaryPropagators. They will be discarded in the -- // textMapPropagator (not read and not set) -- -- // SamplingSet and Sampled handle Sampled tribool logic for interop with -- // instrumenting libraries / propagation channels not using a separate Sampled -- // header and potentially encoding this in flags. -- // -- // When we receive a flag we do this: -- // 1. Sampled bit is set => true -- // 2. Sampled bit is not set => inspect SamplingSet bit. -- // 2a. SamplingSet bit is set => false -- // 2b. SamplingSet bit is not set => null -- // Note on 2b.: depending on the propagator having a separate Sampled header -- // we either assume Sampling is false or unknown. In the latter case we will -- // run our sampler even though we are not the root of the trace. -- // -- // When propagating to a downstream service we will always be explicit and -- // will provide a set SamplingSet bit in case of our binary propagator either -- SamplingSet Flags = 1 << 1 -- Sampled Flags = 1 << 2 -- // When set, we can ignore the value of the parentId. This is used for binary -- // fixed width transports or transports like proto3 that return a default -- // value if a value has not been set (thus not enabling you to distinguish -- // between the value being set to the default or not set at all). -- // -- // While many zipkin systems re-use a trace id as the root span id, we know -- // that some don't. With this flag, we can tell for sure if the span is root -- // as opposed to the convention trace id == span id == parent id. -- IsRoot Flags = 1 << 3 --) -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/log-materializers.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/log-materializers.go -deleted file mode 100644 -index f5695e0e27c44..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/log-materializers.go -+++ /dev/null -@@ -1,113 +0,0 @@ --// Copyright (c) 2016 Uber Technologies, Inc. --// Copyright (c) 2016 Bas van Beek -- --// Permission is hereby granted, free of charge, to any person obtaining a copy --// of this software and associated documentation files (the ""Software""), to deal --// in the Software without restriction, including without limitation the rights --// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell --// copies of the Software, and to permit persons to whom the Software is --// furnished to do so, subject to the following conditions: --// --// The above copyright notice and this permission notice shall be included in --// all copies or substantial portions of the Software. --// --// THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR --// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, --// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE --// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER --// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, --// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN --// THE SOFTWARE. -- --package zipkintracer -- --import ( -- ""bytes"" -- ""encoding/json"" -- ""errors"" -- ""fmt"" -- -- ""github.com/go-logfmt/logfmt"" -- ""github.com/opentracing/opentracing-go/log"" --) -- --var errEventLogNotFound = errors.New(""event log field not found"") -- --type fieldsAsMap map[string]string -- --// MaterializeWithJSON converts log Fields into JSON string --func MaterializeWithJSON(logFields []log.Field) ([]byte, error) { -- fields := fieldsAsMap(make(map[string]string, len(logFields))) -- for _, field := range logFields { -- field.Marshal(fields) -- } -- return json.Marshal(fields) --} -- --// MaterializeWithLogFmt converts log Fields into LogFmt string --func MaterializeWithLogFmt(logFields []log.Field) ([]byte, error) { -- var ( -- buffer = bytes.NewBuffer(nil) -- encoder = logfmt.NewEncoder(buffer) -- ) -- for _, field := range logFields { -- if err := encoder.EncodeKeyval(field.Key(), field.Value()); err != nil { -- encoder.EncodeKeyval(field.Key(), err.Error()) -- } -- } -- return buffer.Bytes(), nil --} -- --// StrictZipkinMaterializer will only record a log.Field of type ""event"". --func StrictZipkinMaterializer(logFields []log.Field) ([]byte, error) { -- for _, field := range logFields { -- if field.Key() == ""event"" { -- return []byte(fmt.Sprintf(""%+v"", field.Value())), nil -- } -- } -- return nil, errEventLogNotFound --} -- --func (ml fieldsAsMap) EmitString(key, value string) { -- ml[key] = value --} -- --func (ml fieldsAsMap) EmitBool(key string, value bool) { -- ml[key] = fmt.Sprintf(""%t"", value) --} -- --func (ml fieldsAsMap) EmitInt(key string, value int) { -- ml[key] = fmt.Sprintf(""%d"", value) --} -- --func (ml fieldsAsMap) EmitInt32(key string, value int32) { -- ml[key] = fmt.Sprintf(""%d"", value) --} -- --func (ml fieldsAsMap) EmitInt64(key string, value int64) { -- ml[key] = fmt.Sprintf(""%d"", value) --} -- --func (ml fieldsAsMap) EmitUint32(key string, value uint32) { -- ml[key] = fmt.Sprintf(""%d"", value) --} -- --func (ml fieldsAsMap) EmitUint64(key string, value uint64) { -- ml[key] = fmt.Sprintf(""%d"", value) --} -- --func (ml fieldsAsMap) EmitFloat32(key string, value float32) { -- ml[key] = fmt.Sprintf(""%f"", value) --} -- --func (ml fieldsAsMap) EmitFloat64(key string, value float64) { -- ml[key] = fmt.Sprintf(""%f"", value) --} -- --func (ml fieldsAsMap) EmitObject(key string, value interface{}) { -- ml[key] = fmt.Sprintf(""%+v"", value) --} -- --func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) { -- value(ml) --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/logger.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/logger.go -deleted file mode 100644 -index 643f653589aa8..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/logger.go -+++ /dev/null -@@ -1,64 +0,0 @@ --package zipkintracer -- --import ( -- ""errors"" -- ""fmt"" -- ""log"" -- ""strings"" --) -- --// ErrMissingValue adds a Missing Value Error when the Logging Parameters are --// not even in number --var ErrMissingValue = errors.New(""(MISSING)"") -- --// Logger is the fundamental interface for all log operations. Log creates a --// log event from keyvals, a variadic sequence of alternating keys and values. --// The signature is compatible with the Go kit log package. --type Logger interface { -- Log(keyvals ...interface{}) error --} -- --// NewNopLogger provides a Logger that discards all Log data sent to it. --func NewNopLogger() Logger { -- return &nopLogger{} --} -- --// LogWrapper wraps a standard library logger into a Logger compatible with this --// package. --func LogWrapper(l *log.Logger) Logger { -- return &wrappedLogger{l: l} --} -- --// wrappedLogger implements Logger --type wrappedLogger struct { -- l *log.Logger --} -- --// Log implements Logger --func (l *wrappedLogger) Log(k ...interface{}) error { -- if len(k)%2 == 1 { -- k = append(k, ErrMissingValue) -- } -- o := make([]string, len(k)/2) -- for i := 0; i < len(k); i += 2 { -- o[i/2] = fmt.Sprintf(""%s=%q"", k[i], k[i+1]) -- } -- l.l.Println(strings.Join(o, "" "")) -- return nil --} -- --// nopLogger implements Logger --type nopLogger struct{} -- --// Log implements Logger --func (*nopLogger) Log(_ ...interface{}) error { return nil } -- --// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If --// f is a function with the appropriate signature, LoggerFunc(f) is a Logger --// object that calls f. --type LoggerFunc func(...interface{}) error -- --// Log implements Logger by calling f(keyvals...). --func (f LoggerFunc) Log(keyvals ...interface{}) error { -- return f(keyvals...) --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/observer.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/observer.go -deleted file mode 100644 -index f46ff011a8256..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/observer.go -+++ /dev/null -@@ -1,52 +0,0 @@ --package zipkintracer -- --import ( -- otobserver ""github.com/opentracing-contrib/go-observer"" -- opentracing ""github.com/opentracing/opentracing-go"" --) -- --// observer is a dispatcher to other observers --type observer struct { -- observers []otobserver.Observer --} -- --// spanObserver is a dispatcher to other span observers --type spanObserver struct { -- observers []otobserver.SpanObserver --} -- --func (o observer) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (otobserver.SpanObserver, bool) { -- var spanObservers []otobserver.SpanObserver -- for _, obs := range o.observers { -- spanObs, ok := obs.OnStartSpan(sp, operationName, options) -- if ok { -- if spanObservers == nil { -- spanObservers = make([]otobserver.SpanObserver, 0, len(o.observers)) -- } -- spanObservers = append(spanObservers, spanObs) -- } -- } -- if len(spanObservers) == 0 { -- return nil, false -- } -- -- return spanObserver{observers: spanObservers}, true --} -- --func (o spanObserver) OnSetOperationName(operationName string) { -- for _, obs := range o.observers { -- obs.OnSetOperationName(operationName) -- } --} -- --func (o spanObserver) OnSetTag(key string, value interface{}) { -- for _, obs := range o.observers { -- obs.OnSetTag(key, value) -- } --} -- --func (o spanObserver) OnFinish(options opentracing.FinishOptions) { -- for _, obs := range o.observers { -- obs.OnFinish(options) -- } --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation.go -deleted file mode 100644 -index 56d2d5aa3db91..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation.go -+++ /dev/null -@@ -1,68 +0,0 @@ --package zipkintracer -- --import ( -- opentracing ""github.com/opentracing/opentracing-go"" -- -- ""github.com/openzipkin/zipkin-go-opentracing/flag"" -- ""github.com/openzipkin/zipkin-go-opentracing/types"" --) -- --type accessorPropagator struct { -- tracer *tracerImpl --} -- --// DelegatingCarrier is a flexible carrier interface which can be implemented --// by types which have a means of storing the trace metadata and already know --// how to serialize themselves (for example, protocol buffers). --type DelegatingCarrier interface { -- SetState(traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags) -- State() (traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags) -- SetBaggageItem(key, value string) -- GetBaggage(func(key, value string)) --} -- --func (p *accessorPropagator) Inject( -- spanContext opentracing.SpanContext, -- carrier interface{}, --) error { -- dc, ok := carrier.(DelegatingCarrier) -- if !ok || dc == nil { -- return opentracing.ErrInvalidCarrier -- } -- sc, ok := spanContext.(SpanContext) -- if !ok { -- return opentracing.ErrInvalidSpanContext -- } -- dc.SetState(sc.TraceID, sc.SpanID, sc.ParentSpanID, sc.Sampled, sc.Flags) -- for k, v := range sc.Baggage { -- dc.SetBaggageItem(k, v) -- } -- return nil --} -- --func (p *accessorPropagator) Extract( -- carrier interface{}, --) (opentracing.SpanContext, error) { -- dc, ok := carrier.(DelegatingCarrier) -- if !ok || dc == nil { -- return nil, opentracing.ErrInvalidCarrier -- } -- -- traceID, spanID, parentSpanID, sampled, flags := dc.State() -- sc := SpanContext{ -- TraceID: traceID, -- SpanID: spanID, -- Sampled: sampled, -- Baggage: nil, -- ParentSpanID: parentSpanID, -- Flags: flags, -- } -- dc.GetBaggage(func(k, v string) { -- if sc.Baggage == nil { -- sc.Baggage = map[string]string{} -- } -- sc.Baggage[k] = v -- }) -- -- return sc, nil --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation_ot.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation_ot.go -deleted file mode 100644 -index 7d102f90a307a..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation_ot.go -+++ /dev/null -@@ -1,257 +0,0 @@ --package zipkintracer -- --import ( -- ""encoding/binary"" -- ""fmt"" -- ""io"" -- ""strconv"" -- ""strings"" -- -- ""github.com/gogo/protobuf/proto"" -- opentracing ""github.com/opentracing/opentracing-go"" -- -- ""github.com/openzipkin/zipkin-go-opentracing/flag"" -- ""github.com/openzipkin/zipkin-go-opentracing/types"" -- ""github.com/openzipkin/zipkin-go-opentracing/wire"" --) -- --type textMapPropagator struct { -- tracer *tracerImpl --} --type binaryPropagator struct { -- tracer *tracerImpl --} -- --const ( -- prefixTracerState = ""x-b3-"" // we default to interop with non-opentracing zipkin tracers -- prefixBaggage = ""ot-baggage-"" -- -- tracerStateFieldCount = 3 // not 5, X-B3-ParentSpanId is optional and we allow optional Sampled header -- zipkinTraceID = prefixTracerState + ""traceid"" -- zipkinSpanID = prefixTracerState + ""spanid"" -- zipkinParentSpanID = prefixTracerState + ""parentspanid"" -- zipkinSampled = prefixTracerState + ""sampled"" -- zipkinFlags = prefixTracerState + ""flags"" --) -- --func (p *textMapPropagator) Inject( -- spanContext opentracing.SpanContext, -- opaqueCarrier interface{}, --) error { -- sc, ok := spanContext.(SpanContext) -- if !ok { -- return opentracing.ErrInvalidSpanContext -- } -- carrier, ok := opaqueCarrier.(opentracing.TextMapWriter) -- if !ok { -- return opentracing.ErrInvalidCarrier -- } -- carrier.Set(zipkinTraceID, sc.TraceID.ToHex()) -- carrier.Set(zipkinSpanID, fmt.Sprintf(""%016x"", sc.SpanID)) -- if sc.Sampled { -- carrier.Set(zipkinSampled, ""1"") -- } else { -- carrier.Set(zipkinSampled, ""0"") -- } -- -- if sc.ParentSpanID != nil { -- // we only set ParentSpanID header if there is a parent span -- carrier.Set(zipkinParentSpanID, fmt.Sprintf(""%016x"", *sc.ParentSpanID)) -- } -- // we only need to inject the debug flag if set. see flag package for details. -- flags := sc.Flags & flag.Debug -- carrier.Set(zipkinFlags, strconv.FormatUint(uint64(flags), 10)) -- -- for k, v := range sc.Baggage { -- carrier.Set(prefixBaggage+k, v) -- } -- return nil --} -- --func (p *textMapPropagator) Extract( -- opaqueCarrier interface{}, --) (opentracing.SpanContext, error) { -- carrier, ok := opaqueCarrier.(opentracing.TextMapReader) -- if !ok { -- return nil, opentracing.ErrInvalidCarrier -- } -- requiredFieldCount := 0 -- var ( -- traceID types.TraceID -- spanID uint64 -- sampled bool -- parentSpanID *uint64 -- flags flag.Flags -- err error -- ) -- decodedBaggage := make(map[string]string) -- err = carrier.ForeachKey(func(k, v string) error { -- switch strings.ToLower(k) { -- case zipkinTraceID: -- traceID, err = types.TraceIDFromHex(v) -- if err != nil { -- return opentracing.ErrSpanContextCorrupted -- } -- case zipkinSpanID: -- spanID, err = strconv.ParseUint(v, 16, 64) -- if err != nil { -- return opentracing.ErrSpanContextCorrupted -- } -- case zipkinParentSpanID: -- var id uint64 -- id, err = strconv.ParseUint(v, 16, 64) -- if err != nil { -- return opentracing.ErrSpanContextCorrupted -- } -- parentSpanID = &id -- case zipkinSampled: -- sampled, err = strconv.ParseBool(v) -- if err != nil { -- return opentracing.ErrSpanContextCorrupted -- } -- // Sampled header was explicitly set -- flags |= flag.SamplingSet -- case zipkinFlags: -- var f uint64 -- f, err = strconv.ParseUint(v, 10, 64) -- if err != nil { -- return opentracing.ErrSpanContextCorrupted -- } -- if flag.Flags(f)&flag.Debug == flag.Debug { -- flags |= flag.Debug -- } -- default: -- lowercaseK := strings.ToLower(k) -- if strings.HasPrefix(lowercaseK, prefixBaggage) { -- decodedBaggage[strings.TrimPrefix(lowercaseK, prefixBaggage)] = v -- } -- // Balance off the requiredFieldCount++ just below... -- requiredFieldCount-- -- } -- requiredFieldCount++ -- return nil -- }) -- if err != nil { -- return nil, err -- } -- if requiredFieldCount < tracerStateFieldCount { -- if requiredFieldCount == 0 { -- return nil, opentracing.ErrSpanContextNotFound -- } -- return nil, opentracing.ErrSpanContextCorrupted -- } -- -- // check if Sample state was communicated through the Flags bitset -- if !sampled && flags&flag.Sampled == flag.Sampled { -- sampled = true -- } -- -- return SpanContext{ -- TraceID: traceID, -- SpanID: spanID, -- Sampled: sampled, -- Baggage: decodedBaggage, -- ParentSpanID: parentSpanID, -- Flags: flags, -- }, nil --} -- --func (p *binaryPropagator) Inject( -- spanContext opentracing.SpanContext, -- opaqueCarrier interface{}, --) error { -- sc, ok := spanContext.(SpanContext) -- if !ok { -- return opentracing.ErrInvalidSpanContext -- } -- carrier, ok := opaqueCarrier.(io.Writer) -- if !ok { -- return opentracing.ErrInvalidCarrier -- } -- -- state := wire.TracerState{} -- state.TraceId = sc.TraceID.Low -- state.TraceIdHigh = sc.TraceID.High -- state.SpanId = sc.SpanID -- state.Sampled = sc.Sampled -- state.BaggageItems = sc.Baggage -- -- // encode the debug bit -- flags := sc.Flags & flag.Debug -- if sc.ParentSpanID != nil { -- state.ParentSpanId = *sc.ParentSpanID -- } else { -- // root span... -- state.ParentSpanId = 0 -- flags |= flag.IsRoot -- } -- -- // we explicitly inform our sampling state downstream -- flags |= flag.SamplingSet -- if sc.Sampled { -- flags |= flag.Sampled -- } -- state.Flags = uint64(flags) -- -- b, err := proto.Marshal(&state) -- if err != nil { -- return err -- } -- -- // Write the length of the marshalled binary to the writer. -- length := uint32(len(b)) -- if err = binary.Write(carrier, binary.BigEndian, &length); err != nil { -- return err -- } -- -- _, err = carrier.Write(b) -- return err --} -- --func (p *binaryPropagator) Extract( -- opaqueCarrier interface{}, --) (opentracing.SpanContext, error) { -- carrier, ok := opaqueCarrier.(io.Reader) -- if !ok { -- return nil, opentracing.ErrInvalidCarrier -- } -- -- // Read the length of marshalled binary. io.ReadAll isn't that performant -- // since it keeps resizing the underlying buffer as it encounters more bytes -- // to read. By reading the length, we can allocate a fixed sized buf and read -- // the exact amount of bytes into it. -- var length uint32 -- if err := binary.Read(carrier, binary.BigEndian, &length); err != nil { -- return nil, opentracing.ErrSpanContextCorrupted -- } -- buf := make([]byte, length) -- if n, err := carrier.Read(buf); err != nil { -- if n > 0 { -- return nil, opentracing.ErrSpanContextCorrupted -- } -- return nil, opentracing.ErrSpanContextNotFound -- } -- -- ctx := wire.TracerState{} -- if err := proto.Unmarshal(buf, &ctx); err != nil { -- return nil, opentracing.ErrSpanContextCorrupted -- } -- -- flags := flag.Flags(ctx.Flags) -- if flags&flag.Sampled == flag.Sampled { -- ctx.Sampled = true -- } -- // this propagator expects sampling state to be explicitly propagated by the -- // upstream service. so set this flag to indentify to tracer it should not -- // run its sampler in case it is not the root of the trace. -- flags |= flag.SamplingSet -- -- return SpanContext{ -- TraceID: types.TraceID{Low: ctx.TraceId, High: ctx.TraceIdHigh}, -- SpanID: ctx.SpanId, -- Sampled: ctx.Sampled, -- Baggage: ctx.BaggageItems, -- ParentSpanID: &ctx.ParentSpanId, -- Flags: flags, -- }, nil --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/raw.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/raw.go -deleted file mode 100644 -index 03bc15b237d52..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/raw.go -+++ /dev/null -@@ -1,30 +0,0 @@ --package zipkintracer -- --import ( -- ""time"" -- -- opentracing ""github.com/opentracing/opentracing-go"" --) -- --// RawSpan encapsulates all state associated with a (finished) Span. --type RawSpan struct { -- // Those recording the RawSpan should also record the contents of its -- // SpanContext. -- Context SpanContext -- -- // The name of the ""operation"" this span is an instance of. (Called a ""span -- // name"" in some implementations) -- Operation string -- -- // We store rather than so that only -- // one of the timestamps has global clock uncertainty issues. -- Start time.Time -- Duration time.Duration -- -- // Essentially an extension mechanism. Can be used for many purposes, -- // not to be enumerated here. -- Tags opentracing.Tags -- -- // The span's ""microlog"". -- Logs []opentracing.LogRecord --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/recorder.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/recorder.go -deleted file mode 100644 -index 0b8eeb7fc5fdb..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/recorder.go -+++ /dev/null -@@ -1,60 +0,0 @@ --package zipkintracer -- --import ""sync"" -- --// A SpanRecorder handles all of the `RawSpan` data generated via an --// associated `Tracer` (see `NewStandardTracer`) instance. It also names --// the containing process and provides access to a straightforward tag map. --type SpanRecorder interface { -- // Implementations must determine whether and where to store `span`. -- RecordSpan(span RawSpan) --} -- --// InMemorySpanRecorder is a simple thread-safe implementation of --// SpanRecorder that stores all reported spans in memory, accessible --// via reporter.GetSpans(). It is primarily intended for testing purposes. --type InMemorySpanRecorder struct { -- sync.RWMutex -- spans []RawSpan --} -- --// NewInMemoryRecorder creates new InMemorySpanRecorder --func NewInMemoryRecorder() *InMemorySpanRecorder { -- return new(InMemorySpanRecorder) --} -- --// RecordSpan implements the respective method of SpanRecorder. --func (r *InMemorySpanRecorder) RecordSpan(span RawSpan) { -- r.Lock() -- defer r.Unlock() -- r.spans = append(r.spans, span) --} -- --// GetSpans returns a copy of the array of spans accumulated so far. --func (r *InMemorySpanRecorder) GetSpans() []RawSpan { -- r.RLock() -- defer r.RUnlock() -- spans := make([]RawSpan, len(r.spans)) -- copy(spans, r.spans) -- return spans --} -- --// GetSampledSpans returns a slice of spans accumulated so far which were sampled. --func (r *InMemorySpanRecorder) GetSampledSpans() []RawSpan { -- r.RLock() -- defer r.RUnlock() -- spans := make([]RawSpan, 0, len(r.spans)) -- for _, span := range r.spans { -- if span.Context.Sampled { -- spans = append(spans, span) -- } -- } -- return spans --} -- --// Reset clears the internal array of spans. --func (r *InMemorySpanRecorder) Reset() { -- r.Lock() -- defer r.Unlock() -- r.spans = nil --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/sample.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/sample.go -deleted file mode 100644 -index bb7ff0a53616b..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/sample.go -+++ /dev/null -@@ -1,99 +0,0 @@ --package zipkintracer -- --import ( -- ""math"" -- ""math/rand"" -- ""sync"" -- ""time"" --) -- --// Sampler functions return if a Zipkin span should be sampled, based on its --// traceID. --type Sampler func(id uint64) bool -- --func neverSample(_ uint64) bool { return false } -- --func alwaysSample(_ uint64) bool { return true } -- --// ModuloSampler provides a typical OpenTracing type Sampler. --func ModuloSampler(mod uint64) Sampler { -- if mod < 2 { -- return alwaysSample -- } -- return func(id uint64) bool { -- return (id % mod) == 0 -- } --} -- --// NewBoundarySampler is appropriate for high-traffic instrumentation who --// provision random trace ids, and make the sampling decision only once. --// It defends against nodes in the cluster selecting exactly the same ids. --func NewBoundarySampler(rate float64, salt int64) Sampler { -- if rate <= 0 { -- return neverSample -- } -- if rate >= 1.0 { -- return alwaysSample -- } -- var ( -- boundary = int64(rate * 10000) -- usalt = uint64(salt) -- ) -- return func(id uint64) bool { -- return int64(math.Abs(float64(id^usalt)))%10000 < boundary -- } --} -- --// NewCountingSampler is appropriate for low-traffic instrumentation or --// those who do not provision random trace ids. It is not appropriate for --// collectors as the sampling decision isn't idempotent (consistent based --// on trace id). --func NewCountingSampler(rate float64) Sampler { -- if rate <= 0 { -- return neverSample -- } -- if rate >= 1.0 { -- return alwaysSample -- } -- var ( -- i = 0 -- outOf100 = int(rate*100 + math.Copysign(0.5, rate*100)) // for rounding float to int conversion instead of truncation -- decisions = randomBitSet(100, outOf100, rand.New(rand.NewSource(time.Now().UnixNano()))) -- mtx = &sync.Mutex{} -- ) -- -- return func(_ uint64) bool { -- mtx.Lock() -- defer mtx.Unlock() -- result := decisions[i] -- i++ -- if i == 100 { -- i = 0 -- } -- return result -- } --} -- --/** -- * Reservoir sampling algorithm borrowed from Stack Overflow. -- * -- * http://stackoverflow.com/questions/12817946/generate-a-random-bitset-with-n-1s -- */ --func randomBitSet(size int, cardinality int, rnd *rand.Rand) []bool { -- result := make([]bool, size) -- chosen := make([]int, cardinality) -- var i int -- for i = 0; i < cardinality; i++ { -- chosen[i] = i -- result[i] = true -- } -- for ; i < size; i++ { -- j := rnd.Intn(i + 1) -- if j < cardinality { -- result[chosen[j]] = false -- result[i] = true -- chosen[j] = i -- } -- } -- return result --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/span.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/span.go -deleted file mode 100644 -index 4850a94d059b6..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/span.go -+++ /dev/null -@@ -1,290 +0,0 @@ --package zipkintracer -- --import ( -- ""sync"" -- ""time"" -- -- opentracing ""github.com/opentracing/opentracing-go"" -- ""github.com/opentracing/opentracing-go/ext"" -- ""github.com/opentracing/opentracing-go/log"" -- -- otobserver ""github.com/opentracing-contrib/go-observer"" -- ""github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"" --) -- --// Span provides access to the essential details of the span, for use --// by zipkintracer consumers. These methods may only be called prior --// to (*opentracing.Span).Finish(). --type Span interface { -- opentracing.Span -- -- // Operation names the work done by this span instance -- Operation() string -- -- // Start indicates when the span began -- Start() time.Time --} -- --// Implements the `Span` interface. Created via tracerImpl (see --// `zipkintracer.NewTracer()`). --type spanImpl struct { -- tracer *tracerImpl -- event func(SpanEvent) -- observer otobserver.SpanObserver -- sync.Mutex // protects the fields below -- raw RawSpan -- // The number of logs dropped because of MaxLogsPerSpan. -- numDroppedLogs int -- Endpoint *zipkincore.Endpoint --} -- --var spanPool = &sync.Pool{New: func() interface{} { -- return &spanImpl{} --}} -- --func (s *spanImpl) reset() { -- s.tracer, s.event = nil, nil -- // Note: Would like to do the following, but then the consumer of RawSpan -- // (the recorder) needs to make sure that they're not holding on to the -- // baggage or logs when they return (i.e. they need to copy if they care): -- // -- // logs, baggage := s.raw.Logs[:0], s.raw.Baggage -- // for k := range baggage { -- // delete(baggage, k) -- // } -- // s.raw.Logs, s.raw.Baggage = logs, baggage -- // -- // That's likely too much to ask for. But there is some magic we should -- // be able to do with `runtime.SetFinalizer` to reclaim that memory into -- // a buffer pool when GC considers them unreachable, which should ease -- // some of the load. Hard to say how quickly that would be in practice -- // though. -- s.raw = RawSpan{ -- Context: SpanContext{}, -- } --} -- --func (s *spanImpl) SetOperationName(operationName string) opentracing.Span { -- if s.observer != nil { -- s.observer.OnSetOperationName(operationName) -- } -- s.Lock() -- defer s.Unlock() -- s.raw.Operation = operationName -- return s --} -- --func (s *spanImpl) trim() bool { -- return !s.raw.Context.Sampled && s.tracer.options.trimUnsampledSpans --} -- --func (s *spanImpl) SetTag(key string, value interface{}) opentracing.Span { -- defer s.onTag(key, value) -- if s.observer != nil { -- s.observer.OnSetTag(key, value) -- } -- -- s.Lock() -- defer s.Unlock() -- if key == string(ext.SamplingPriority) { -- if v, ok := value.(uint16); ok { -- s.raw.Context.Sampled = v != 0 -- return s -- } -- } -- if s.trim() { -- return s -- } -- -- if s.raw.Tags == nil { -- s.raw.Tags = opentracing.Tags{} -- } -- s.raw.Tags[key] = value -- return s --} -- --func (s *spanImpl) LogKV(keyValues ...interface{}) { -- fields, err := log.InterleavedKVToFields(keyValues...) -- if err != nil { -- s.LogFields(log.Error(err), log.String(""function"", ""LogKV"")) -- return -- } -- s.LogFields(fields...) --} -- --func (s *spanImpl) appendLog(lr opentracing.LogRecord) { -- maxLogs := s.tracer.options.maxLogsPerSpan -- if maxLogs == 0 || len(s.raw.Logs) < maxLogs { -- s.raw.Logs = append(s.raw.Logs, lr) -- return -- } -- -- // We have too many logs. We don't touch the first numOld logs; we treat the -- // rest as a circular buffer and overwrite the oldest log among those. -- numOld := (maxLogs - 1) / 2 -- numNew := maxLogs - numOld -- s.raw.Logs[numOld+s.numDroppedLogs%numNew] = lr -- s.numDroppedLogs++ --} -- --func (s *spanImpl) LogFields(fields ...log.Field) { -- lr := opentracing.LogRecord{ -- Fields: fields, -- } -- defer s.onLogFields(lr) -- s.Lock() -- defer s.Unlock() -- if s.trim() || s.tracer.options.dropAllLogs { -- return -- } -- if lr.Timestamp.IsZero() { -- lr.Timestamp = time.Now() -- } -- s.appendLog(lr) --} -- --func (s *spanImpl) LogEvent(event string) { -- s.Log(opentracing.LogData{ -- Event: event, -- }) --} -- --func (s *spanImpl) LogEventWithPayload(event string, payload interface{}) { -- s.Log(opentracing.LogData{ -- Event: event, -- Payload: payload, -- }) --} -- --func (s *spanImpl) Log(ld opentracing.LogData) { -- defer s.onLog(ld) -- s.Lock() -- defer s.Unlock() -- if s.trim() || s.tracer.options.dropAllLogs { -- return -- } -- -- if ld.Timestamp.IsZero() { -- ld.Timestamp = time.Now() -- } -- -- s.appendLog(ld.ToLogRecord()) --} -- --func (s *spanImpl) Finish() { -- s.FinishWithOptions(opentracing.FinishOptions{}) --} -- --// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at --// the end (i.e. pos circular left shifts). --func rotateLogBuffer(buf []opentracing.LogRecord, pos int) { -- // This algorithm is described in: -- // http://www.cplusplus.com/reference/algorithm/rotate -- for first, middle, next := 0, pos, pos; first != middle; { -- buf[first], buf[next] = buf[next], buf[first] -- first++ -- next++ -- if next == len(buf) { -- next = middle -- } else if first == middle { -- middle = next -- } -- } --} -- --func (s *spanImpl) FinishWithOptions(opts opentracing.FinishOptions) { -- finishTime := opts.FinishTime -- if finishTime.IsZero() { -- finishTime = time.Now() -- } -- duration := finishTime.Sub(s.raw.Start) -- -- if s.observer != nil { -- s.observer.OnFinish(opts) -- } -- -- s.Lock() -- defer s.Unlock() -- -- for _, lr := range opts.LogRecords { -- s.appendLog(lr) -- } -- for _, ld := range opts.BulkLogData { -- s.appendLog(ld.ToLogRecord()) -- } -- -- if s.numDroppedLogs > 0 { -- // We dropped some log events, which means that we used part of Logs as a -- // circular buffer (see appendLog). De-circularize it. -- numOld := (len(s.raw.Logs) - 1) / 2 -- numNew := len(s.raw.Logs) - numOld -- rotateLogBuffer(s.raw.Logs[numOld:], s.numDroppedLogs%numNew) -- -- // Replace the log in the middle (the oldest ""new"" log) with information -- // about the dropped logs. This means that we are effectively dropping one -- // more ""new"" log. -- numDropped := s.numDroppedLogs + 1 -- s.raw.Logs[numOld] = opentracing.LogRecord{ -- // Keep the timestamp of the last dropped event. -- Timestamp: s.raw.Logs[numOld].Timestamp, -- Fields: []log.Field{ -- log.String(""event"", ""dropped Span logs""), -- log.Int(""dropped_log_count"", numDropped), -- log.String(""component"", ""zipkintracer""), -- }, -- } -- } -- -- s.raw.Duration = duration -- -- s.onFinish(s.raw) -- s.tracer.options.recorder.RecordSpan(s.raw) -- -- // Last chance to get options before the span is possibly reset. -- poolEnabled := s.tracer.options.enableSpanPool -- if s.tracer.options.debugAssertUseAfterFinish { -- // This makes it much more likely to catch a panic on any subsequent -- // operation since s.tracer is accessed on every call to `Lock`. -- // We don't call `reset()` here to preserve the logs in the Span -- // which are printed when the assertion triggers. -- s.tracer = nil -- } -- -- if poolEnabled { -- spanPool.Put(s) -- } --} -- --func (s *spanImpl) Tracer() opentracing.Tracer { -- return s.tracer --} -- --func (s *spanImpl) Context() opentracing.SpanContext { -- return s.raw.Context --} -- --func (s *spanImpl) SetBaggageItem(key, val string) opentracing.Span { -- s.onBaggage(key, val) -- if s.trim() { -- return s -- } -- -- s.Lock() -- defer s.Unlock() -- s.raw.Context = s.raw.Context.WithBaggageItem(key, val) -- return s --} -- --func (s *spanImpl) BaggageItem(key string) string { -- s.Lock() -- defer s.Unlock() -- return s.raw.Context.Baggage[key] --} -- --func (s *spanImpl) Operation() string { -- return s.raw.Operation --} -- --func (s *spanImpl) Start() time.Time { -- return s.raw.Start --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/GoUnusedProtection__.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/GoUnusedProtection__.go -deleted file mode 100644 -index 9b51d874a644d..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/GoUnusedProtection__.go -+++ /dev/null -@@ -1,7 +0,0 @@ --// Autogenerated by Thrift Compiler (1.0.0-dev) --// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -- --package scribe -- --var GoUnusedProtection__ int; -- -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe-consts.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe-consts.go -deleted file mode 100644 -index be5fbae02e849..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe-consts.go -+++ /dev/null -@@ -1,24 +0,0 @@ --// Autogenerated by Thrift Compiler (1.0.0-dev) --// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -- --package scribe -- --import ( -- ""bytes"" -- ""reflect"" -- ""context"" -- ""fmt"" -- ""github.com/apache/thrift/lib/go/thrift"" --) -- --// (needed to ensure safety because of naive import list construction.) --var _ = thrift.ZERO --var _ = fmt.Printf --var _ = context.Background --var _ = reflect.DeepEqual --var _ = bytes.Equal -- -- --func init() { --} -- -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe.go -deleted file mode 100644 -index 3767688b6d3a0..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe.go -+++ /dev/null -@@ -1,550 +0,0 @@ --// Autogenerated by Thrift Compiler (1.0.0-dev) --// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -- --package scribe -- --import ( -- ""bytes"" -- ""reflect"" -- ""database/sql/driver"" -- ""errors"" -- ""context"" -- ""fmt"" -- ""github.com/apache/thrift/lib/go/thrift"" --) -- --// (needed to ensure safety because of naive import list construction.) --var _ = thrift.ZERO --var _ = fmt.Printf --var _ = context.Background --var _ = reflect.DeepEqual --var _ = bytes.Equal -- --type ResultCode int64 --const ( -- ResultCode_OK ResultCode = 0 -- ResultCode_TRY_LATER ResultCode = 1 --) -- --func (p ResultCode) String() string { -- switch p { -- case ResultCode_OK: return ""OK"" -- case ResultCode_TRY_LATER: return ""TRY_LATER"" -- } -- return """" --} -- --func ResultCodeFromString(s string) (ResultCode, error) { -- switch s { -- case ""OK"": return ResultCode_OK, nil -- case ""TRY_LATER"": return ResultCode_TRY_LATER, nil -- } -- return ResultCode(0), fmt.Errorf(""not a valid ResultCode string"") --} -- -- --func ResultCodePtr(v ResultCode) *ResultCode { return &v } -- --func (p ResultCode) MarshalText() ([]byte, error) { --return []byte(p.String()), nil --} -- --func (p *ResultCode) UnmarshalText(text []byte) error { --q, err := ResultCodeFromString(string(text)) --if (err != nil) { --return err --} --*p = q --return nil --} -- --func (p *ResultCode) Scan(value interface{}) error { --v, ok := value.(int64) --if !ok { --return errors.New(""Scan value is not int64"") --} --*p = ResultCode(v) --return nil --} -- --func (p * ResultCode) Value() (driver.Value, error) { -- if p == nil { -- return nil, nil -- } --return int64(*p), nil --} --// Attributes: --// - Category --// - Message --type LogEntry struct { -- Category string `thrift:""category,1"" db:""category"" json:""category""` -- Message string `thrift:""message,2"" db:""message"" json:""message""` --} -- --func NewLogEntry() *LogEntry { -- return &LogEntry{} --} -- -- --func (p *LogEntry) GetCategory() string { -- return p.Category --} -- --func (p *LogEntry) GetMessage() string { -- return p.Message --} --func (p *LogEntry) Read(iprot thrift.TProtocol) error { -- if _, err := iprot.ReadStructBegin(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T read error: "", p), err) -- } -- -- -- for { -- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() -- if err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T field %d read error: "", p, fieldId), err) -- } -- if fieldTypeId == thrift.STOP { break; } -- switch fieldId { -- case 1: -- if fieldTypeId == thrift.STRING { -- if err := p.ReadField1(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- case 2: -- if fieldTypeId == thrift.STRING { -- if err := p.ReadField2(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- default: -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- if err := iprot.ReadFieldEnd(); err != nil { -- return err -- } -- } -- if err := iprot.ReadStructEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T read struct end error: "", p), err) -- } -- return nil --} -- --func (p *LogEntry) ReadField1(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadString(); err != nil { -- return thrift.PrependError(""error reading field 1: "", err) --} else { -- p.Category = v --} -- return nil --} -- --func (p *LogEntry) ReadField2(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadString(); err != nil { -- return thrift.PrependError(""error reading field 2: "", err) --} else { -- p.Message = v --} -- return nil --} -- --func (p *LogEntry) Write(oprot thrift.TProtocol) error { -- if err := oprot.WriteStructBegin(""LogEntry""); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write struct begin error: "", p), err) } -- if p != nil { -- if err := p.writeField1(oprot); err != nil { return err } -- if err := p.writeField2(oprot); err != nil { return err } -- } -- if err := oprot.WriteFieldStop(); err != nil { -- return thrift.PrependError(""write field stop error: "", err) } -- if err := oprot.WriteStructEnd(); err != nil { -- return thrift.PrependError(""write struct stop error: "", err) } -- return nil --} -- --func (p *LogEntry) writeField1(oprot thrift.TProtocol) (err error) { -- if err := oprot.WriteFieldBegin(""category"", thrift.STRING, 1); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 1:category: "", p), err) } -- if err := oprot.WriteString(string(p.Category)); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.category (1) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 1:category: "", p), err) } -- return err --} -- --func (p *LogEntry) writeField2(oprot thrift.TProtocol) (err error) { -- if err := oprot.WriteFieldBegin(""message"", thrift.STRING, 2); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 2:message: "", p), err) } -- if err := oprot.WriteString(string(p.Message)); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.message (2) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 2:message: "", p), err) } -- return err --} -- --func (p *LogEntry) String() string { -- if p == nil { -- return """" -- } -- return fmt.Sprintf(""LogEntry(%+v)"", *p) --} -- --type Scribe interface { -- // Parameters: -- // - Messages -- Log(ctx context.Context, messages []*LogEntry) (r ResultCode, err error) --} -- --type ScribeClient struct { -- c thrift.TClient --} -- --// Deprecated: Use NewScribe instead --func NewScribeClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ScribeClient { -- return &ScribeClient{ -- c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), -- } --} -- --// Deprecated: Use NewScribe instead --func NewScribeClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ScribeClient { -- return &ScribeClient{ -- c: thrift.NewTStandardClient(iprot, oprot), -- } --} -- --func NewScribeClient(c thrift.TClient) *ScribeClient { -- return &ScribeClient{ -- c: c, -- } --} -- --// Parameters: --// - Messages --func (p *ScribeClient) Log(ctx context.Context, messages []*LogEntry) (r ResultCode, err error) { -- var _args0 ScribeLogArgs -- _args0.Messages = messages -- var _result1 ScribeLogResult -- if err = p.c.Call(ctx, ""Log"", &_args0, &_result1); err != nil { -- return -- } -- return _result1.GetSuccess(), nil --} -- --type ScribeProcessor struct { -- processorMap map[string]thrift.TProcessorFunction -- handler Scribe --} -- --func (p *ScribeProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { -- p.processorMap[key] = processor --} -- --func (p *ScribeProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { -- processor, ok = p.processorMap[key] -- return processor, ok --} -- --func (p *ScribeProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { -- return p.processorMap --} -- --func NewScribeProcessor(handler Scribe) *ScribeProcessor { -- -- self2 := &ScribeProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)} -- self2.processorMap[""Log""] = &scribeProcessorLog{handler:handler} --return self2 --} -- --func (p *ScribeProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { -- name, _, seqId, err := iprot.ReadMessageBegin() -- if err != nil { return false, err } -- if processor, ok := p.GetProcessorFunction(name); ok { -- return processor.Process(ctx, seqId, iprot, oprot) -- } -- iprot.Skip(thrift.STRUCT) -- iprot.ReadMessageEnd() -- x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, ""Unknown function "" + name) -- oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) -- x3.Write(oprot) -- oprot.WriteMessageEnd() -- oprot.Flush() -- return false, x3 -- --} -- --type scribeProcessorLog struct { -- handler Scribe --} -- --func (p *scribeProcessorLog) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { -- args := ScribeLogArgs{} -- if err = args.Read(iprot); err != nil { -- iprot.ReadMessageEnd() -- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) -- oprot.WriteMessageBegin(""Log"", thrift.EXCEPTION, seqId) -- x.Write(oprot) -- oprot.WriteMessageEnd() -- oprot.Flush() -- return false, err -- } -- -- iprot.ReadMessageEnd() -- result := ScribeLogResult{} --var retval ResultCode -- var err2 error -- if retval, err2 = p.handler.Log(ctx, args.Messages); err2 != nil { -- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, ""Internal error processing Log: "" + err2.Error()) -- oprot.WriteMessageBegin(""Log"", thrift.EXCEPTION, seqId) -- x.Write(oprot) -- oprot.WriteMessageEnd() -- oprot.Flush() -- return true, err2 -- } else { -- result.Success = &retval --} -- if err2 = oprot.WriteMessageBegin(""Log"", thrift.REPLY, seqId); err2 != nil { -- err = err2 -- } -- if err2 = result.Write(oprot); err == nil && err2 != nil { -- err = err2 -- } -- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { -- err = err2 -- } -- if err2 = oprot.Flush(); err == nil && err2 != nil { -- err = err2 -- } -- if err != nil { -- return -- } -- return true, err --} -- -- --// HELPER FUNCTIONS AND STRUCTURES -- --// Attributes: --// - Messages --type ScribeLogArgs struct { -- Messages []*LogEntry `thrift:""messages,1"" db:""messages"" json:""messages""` --} -- --func NewScribeLogArgs() *ScribeLogArgs { -- return &ScribeLogArgs{} --} -- -- --func (p *ScribeLogArgs) GetMessages() []*LogEntry { -- return p.Messages --} --func (p *ScribeLogArgs) Read(iprot thrift.TProtocol) error { -- if _, err := iprot.ReadStructBegin(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T read error: "", p), err) -- } -- -- -- for { -- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() -- if err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T field %d read error: "", p, fieldId), err) -- } -- if fieldTypeId == thrift.STOP { break; } -- switch fieldId { -- case 1: -- if fieldTypeId == thrift.LIST { -- if err := p.ReadField1(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- default: -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- if err := iprot.ReadFieldEnd(); err != nil { -- return err -- } -- } -- if err := iprot.ReadStructEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T read struct end error: "", p), err) -- } -- return nil --} -- --func (p *ScribeLogArgs) ReadField1(iprot thrift.TProtocol) error { -- _, size, err := iprot.ReadListBegin() -- if err != nil { -- return thrift.PrependError(""error reading list begin: "", err) -- } -- tSlice := make([]*LogEntry, 0, size) -- p.Messages = tSlice -- for i := 0; i < size; i ++ { -- _elem4 := &LogEntry{} -- if err := _elem4.Read(iprot); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T error reading struct: "", _elem4), err) -- } -- p.Messages = append(p.Messages, _elem4) -- } -- if err := iprot.ReadListEnd(); err != nil { -- return thrift.PrependError(""error reading list end: "", err) -- } -- return nil --} -- --func (p *ScribeLogArgs) Write(oprot thrift.TProtocol) error { -- if err := oprot.WriteStructBegin(""Log_args""); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write struct begin error: "", p), err) } -- if p != nil { -- if err := p.writeField1(oprot); err != nil { return err } -- } -- if err := oprot.WriteFieldStop(); err != nil { -- return thrift.PrependError(""write field stop error: "", err) } -- if err := oprot.WriteStructEnd(); err != nil { -- return thrift.PrependError(""write struct stop error: "", err) } -- return nil --} -- --func (p *ScribeLogArgs) writeField1(oprot thrift.TProtocol) (err error) { -- if err := oprot.WriteFieldBegin(""messages"", thrift.LIST, 1); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 1:messages: "", p), err) } -- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Messages)); err != nil { -- return thrift.PrependError(""error writing list begin: "", err) -- } -- for _, v := range p.Messages { -- if err := v.Write(oprot); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T error writing struct: "", v), err) -- } -- } -- if err := oprot.WriteListEnd(); err != nil { -- return thrift.PrependError(""error writing list end: "", err) -- } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 1:messages: "", p), err) } -- return err --} -- --func (p *ScribeLogArgs) String() string { -- if p == nil { -- return """" -- } -- return fmt.Sprintf(""ScribeLogArgs(%+v)"", *p) --} -- --// Attributes: --// - Success --type ScribeLogResult struct { -- Success *ResultCode `thrift:""success,0"" db:""success"" json:""success,omitempty""` --} -- --func NewScribeLogResult() *ScribeLogResult { -- return &ScribeLogResult{} --} -- --var ScribeLogResult_Success_DEFAULT ResultCode --func (p *ScribeLogResult) GetSuccess() ResultCode { -- if !p.IsSetSuccess() { -- return ScribeLogResult_Success_DEFAULT -- } --return *p.Success --} --func (p *ScribeLogResult) IsSetSuccess() bool { -- return p.Success != nil --} -- --func (p *ScribeLogResult) Read(iprot thrift.TProtocol) error { -- if _, err := iprot.ReadStructBegin(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T read error: "", p), err) -- } -- -- -- for { -- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() -- if err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T field %d read error: "", p, fieldId), err) -- } -- if fieldTypeId == thrift.STOP { break; } -- switch fieldId { -- case 0: -- if fieldTypeId == thrift.I32 { -- if err := p.ReadField0(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- default: -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- if err := iprot.ReadFieldEnd(); err != nil { -- return err -- } -- } -- if err := iprot.ReadStructEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T read struct end error: "", p), err) -- } -- return nil --} -- --func (p *ScribeLogResult) ReadField0(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadI32(); err != nil { -- return thrift.PrependError(""error reading field 0: "", err) --} else { -- temp := ResultCode(v) -- p.Success = &temp --} -- return nil --} -- --func (p *ScribeLogResult) Write(oprot thrift.TProtocol) error { -- if err := oprot.WriteStructBegin(""Log_result""); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write struct begin error: "", p), err) } -- if p != nil { -- if err := p.writeField0(oprot); err != nil { return err } -- } -- if err := oprot.WriteFieldStop(); err != nil { -- return thrift.PrependError(""write field stop error: "", err) } -- if err := oprot.WriteStructEnd(); err != nil { -- return thrift.PrependError(""write struct stop error: "", err) } -- return nil --} -- --func (p *ScribeLogResult) writeField0(oprot thrift.TProtocol) (err error) { -- if p.IsSetSuccess() { -- if err := oprot.WriteFieldBegin(""success"", thrift.I32, 0); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 0:success: "", p), err) } -- if err := oprot.WriteI32(int32(*p.Success)); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.success (0) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 0:success: "", p), err) } -- } -- return err --} -- --func (p *ScribeLogResult) String() string { -- if p == nil { -- return """" -- } -- return fmt.Sprintf(""ScribeLogResult(%+v)"", *p) --} -- -- -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/GoUnusedProtection__.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/GoUnusedProtection__.go -deleted file mode 100644 -index 2d5ebe7f43748..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/GoUnusedProtection__.go -+++ /dev/null -@@ -1,7 +0,0 @@ --// Autogenerated by Thrift Compiler (1.0.0-dev) --// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -- --package zipkincore -- --var GoUnusedProtection__ int; -- -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore-consts.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore-consts.go -deleted file mode 100644 -index 6a662eae6d47b..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore-consts.go -+++ /dev/null -@@ -1,45 +0,0 @@ --// Autogenerated by Thrift Compiler (1.0.0-dev) --// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -- --package zipkincore -- --import ( -- ""bytes"" -- ""reflect"" -- ""context"" -- ""fmt"" -- ""github.com/apache/thrift/lib/go/thrift"" --) -- --// (needed to ensure safety because of naive import list construction.) --var _ = thrift.ZERO --var _ = fmt.Printf --var _ = context.Background --var _ = reflect.DeepEqual --var _ = bytes.Equal -- --const CLIENT_SEND = ""cs"" --const CLIENT_RECV = ""cr"" --const SERVER_SEND = ""ss"" --const SERVER_RECV = ""sr"" --const WIRE_SEND = ""ws"" --const WIRE_RECV = ""wr"" --const CLIENT_SEND_FRAGMENT = ""csf"" --const CLIENT_RECV_FRAGMENT = ""crf"" --const SERVER_SEND_FRAGMENT = ""ssf"" --const SERVER_RECV_FRAGMENT = ""srf"" --const HTTP_HOST = ""http.host"" --const HTTP_METHOD = ""http.method"" --const HTTP_PATH = ""http.path"" --const HTTP_URL = ""http.url"" --const HTTP_STATUS_CODE = ""http.status_code"" --const HTTP_REQUEST_SIZE = ""http.request.size"" --const HTTP_RESPONSE_SIZE = ""http.response.size"" --const LOCAL_COMPONENT = ""lc"" --const ERROR = ""error"" --const CLIENT_ADDR = ""ca"" --const SERVER_ADDR = ""sa"" -- --func init() { --} -- -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore.go -deleted file mode 100644 -index 42f048a3f0e80..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore.go -+++ /dev/null -@@ -1,1285 +0,0 @@ --// Autogenerated by Thrift Compiler (1.0.0-dev) --// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -- --package zipkincore -- --import ( -- ""bytes"" -- ""reflect"" -- ""database/sql/driver"" -- ""errors"" -- ""context"" -- ""fmt"" -- ""github.com/apache/thrift/lib/go/thrift"" --) -- --// (needed to ensure safety because of naive import list construction.) --var _ = thrift.ZERO --var _ = fmt.Printf --var _ = context.Background --var _ = reflect.DeepEqual --var _ = bytes.Equal -- --//A subset of thrift base types, except BYTES. --type AnnotationType int64 --const ( -- AnnotationType_BOOL AnnotationType = 0 -- AnnotationType_BYTES AnnotationType = 1 -- AnnotationType_I16 AnnotationType = 2 -- AnnotationType_I32 AnnotationType = 3 -- AnnotationType_I64 AnnotationType = 4 -- AnnotationType_DOUBLE AnnotationType = 5 -- AnnotationType_STRING AnnotationType = 6 --) -- --func (p AnnotationType) String() string { -- switch p { -- case AnnotationType_BOOL: return ""BOOL"" -- case AnnotationType_BYTES: return ""BYTES"" -- case AnnotationType_I16: return ""I16"" -- case AnnotationType_I32: return ""I32"" -- case AnnotationType_I64: return ""I64"" -- case AnnotationType_DOUBLE: return ""DOUBLE"" -- case AnnotationType_STRING: return ""STRING"" -- } -- return """" --} -- --func AnnotationTypeFromString(s string) (AnnotationType, error) { -- switch s { -- case ""BOOL"": return AnnotationType_BOOL, nil -- case ""BYTES"": return AnnotationType_BYTES, nil -- case ""I16"": return AnnotationType_I16, nil -- case ""I32"": return AnnotationType_I32, nil -- case ""I64"": return AnnotationType_I64, nil -- case ""DOUBLE"": return AnnotationType_DOUBLE, nil -- case ""STRING"": return AnnotationType_STRING, nil -- } -- return AnnotationType(0), fmt.Errorf(""not a valid AnnotationType string"") --} -- -- --func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v } -- --func (p AnnotationType) MarshalText() ([]byte, error) { --return []byte(p.String()), nil --} -- --func (p *AnnotationType) UnmarshalText(text []byte) error { --q, err := AnnotationTypeFromString(string(text)) --if (err != nil) { --return err --} --*p = q --return nil --} -- --func (p *AnnotationType) Scan(value interface{}) error { --v, ok := value.(int64) --if !ok { --return errors.New(""Scan value is not int64"") --} --*p = AnnotationType(v) --return nil --} -- --func (p * AnnotationType) Value() (driver.Value, error) { -- if p == nil { -- return nil, nil -- } --return int64(*p), nil --} --// Indicates the network context of a service recording an annotation with two --// exceptions. --// --// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR, --// the endpoint indicates the source or destination of an RPC. This exception --// allows zipkin to display network context of uninstrumented services, or --// clients such as web browsers. --// --// Attributes: --// - Ipv4: IPv4 host address packed into 4 bytes. --// --// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4 --// - Port: IPv4 port or 0, if unknown. --// --// Note: this is to be treated as an unsigned integer, so watch for negatives. --// - ServiceName: Classifier of a source or destination in lowercase, such as ""zipkin-web"". --// --// This is the primary parameter for trace lookup, so should be intuitive as --// possible, for example, matching names in service discovery. --// --// Conventionally, when the service name isn't known, service_name = ""unknown"". --// However, it is also permissible to set service_name = """" (empty string). --// The difference in the latter usage is that the span will not be queryable --// by service name unless more information is added to the span with non-empty --// service name, e.g. an additional annotation from the server. --// --// Particularly clients may not have a reliable service name at ingest. One --// approach is to set service_name to """" at ingest, and later assign a --// better label based on binary annotations, such as user agent. --// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes() --type Endpoint struct { -- Ipv4 int32 `thrift:""ipv4,1"" db:""ipv4"" json:""ipv4""` -- Port int16 `thrift:""port,2"" db:""port"" json:""port""` -- ServiceName string `thrift:""service_name,3"" db:""service_name"" json:""service_name""` -- Ipv6 []byte `thrift:""ipv6,4"" db:""ipv6"" json:""ipv6,omitempty""` --} -- --func NewEndpoint() *Endpoint { -- return &Endpoint{} --} -- -- --func (p *Endpoint) GetIpv4() int32 { -- return p.Ipv4 --} -- --func (p *Endpoint) GetPort() int16 { -- return p.Port --} -- --func (p *Endpoint) GetServiceName() string { -- return p.ServiceName --} --var Endpoint_Ipv6_DEFAULT []byte -- --func (p *Endpoint) GetIpv6() []byte { -- return p.Ipv6 --} --func (p *Endpoint) IsSetIpv6() bool { -- return p.Ipv6 != nil --} -- --func (p *Endpoint) Read(iprot thrift.TProtocol) error { -- if _, err := iprot.ReadStructBegin(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T read error: "", p), err) -- } -- -- -- for { -- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() -- if err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T field %d read error: "", p, fieldId), err) -- } -- if fieldTypeId == thrift.STOP { break; } -- switch fieldId { -- case 1: -- if fieldTypeId == thrift.I32 { -- if err := p.ReadField1(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- case 2: -- if fieldTypeId == thrift.I16 { -- if err := p.ReadField2(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- case 3: -- if fieldTypeId == thrift.STRING { -- if err := p.ReadField3(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- case 4: -- if fieldTypeId == thrift.STRING { -- if err := p.ReadField4(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- default: -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- if err := iprot.ReadFieldEnd(); err != nil { -- return err -- } -- } -- if err := iprot.ReadStructEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T read struct end error: "", p), err) -- } -- return nil --} -- --func (p *Endpoint) ReadField1(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadI32(); err != nil { -- return thrift.PrependError(""error reading field 1: "", err) --} else { -- p.Ipv4 = v --} -- return nil --} -- --func (p *Endpoint) ReadField2(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadI16(); err != nil { -- return thrift.PrependError(""error reading field 2: "", err) --} else { -- p.Port = v --} -- return nil --} -- --func (p *Endpoint) ReadField3(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadString(); err != nil { -- return thrift.PrependError(""error reading field 3: "", err) --} else { -- p.ServiceName = v --} -- return nil --} -- --func (p *Endpoint) ReadField4(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadBinary(); err != nil { -- return thrift.PrependError(""error reading field 4: "", err) --} else { -- p.Ipv6 = v --} -- return nil --} -- --func (p *Endpoint) Write(oprot thrift.TProtocol) error { -- if err := oprot.WriteStructBegin(""Endpoint""); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write struct begin error: "", p), err) } -- if p != nil { -- if err := p.writeField1(oprot); err != nil { return err } -- if err := p.writeField2(oprot); err != nil { return err } -- if err := p.writeField3(oprot); err != nil { return err } -- if err := p.writeField4(oprot); err != nil { return err } -- } -- if err := oprot.WriteFieldStop(); err != nil { -- return thrift.PrependError(""write field stop error: "", err) } -- if err := oprot.WriteStructEnd(); err != nil { -- return thrift.PrependError(""write struct stop error: "", err) } -- return nil --} -- --func (p *Endpoint) writeField1(oprot thrift.TProtocol) (err error) { -- if err := oprot.WriteFieldBegin(""ipv4"", thrift.I32, 1); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 1:ipv4: "", p), err) } -- if err := oprot.WriteI32(int32(p.Ipv4)); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.ipv4 (1) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 1:ipv4: "", p), err) } -- return err --} -- --func (p *Endpoint) writeField2(oprot thrift.TProtocol) (err error) { -- if err := oprot.WriteFieldBegin(""port"", thrift.I16, 2); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 2:port: "", p), err) } -- if err := oprot.WriteI16(int16(p.Port)); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.port (2) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 2:port: "", p), err) } -- return err --} -- --func (p *Endpoint) writeField3(oprot thrift.TProtocol) (err error) { -- if err := oprot.WriteFieldBegin(""service_name"", thrift.STRING, 3); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 3:service_name: "", p), err) } -- if err := oprot.WriteString(string(p.ServiceName)); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.service_name (3) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 3:service_name: "", p), err) } -- return err --} -- --func (p *Endpoint) writeField4(oprot thrift.TProtocol) (err error) { -- if p.IsSetIpv6() { -- if err := oprot.WriteFieldBegin(""ipv6"", thrift.STRING, 4); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 4:ipv6: "", p), err) } -- if err := oprot.WriteBinary(p.Ipv6); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.ipv6 (4) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 4:ipv6: "", p), err) } -- } -- return err --} -- --func (p *Endpoint) String() string { -- if p == nil { -- return """" -- } -- return fmt.Sprintf(""Endpoint(%+v)"", *p) --} -- --// Associates an event that explains latency with a timestamp. --// --// Unlike log statements, annotations are often codes: for example ""sr"". --// --// Attributes: --// - Timestamp: Microseconds from epoch. --// --// This value should use the most precise value possible. For example, --// gettimeofday or multiplying currentTimeMillis by 1000. --// - Value: Usually a short tag indicating an event, like ""sr"" or ""finagle.retry"". --// - Host: The host that recorded the value, primarily for query by service name. --type Annotation struct { -- Timestamp int64 `thrift:""timestamp,1"" db:""timestamp"" json:""timestamp""` -- Value string `thrift:""value,2"" db:""value"" json:""value""` -- Host *Endpoint `thrift:""host,3"" db:""host"" json:""host,omitempty""` --} -- --func NewAnnotation() *Annotation { -- return &Annotation{} --} -- -- --func (p *Annotation) GetTimestamp() int64 { -- return p.Timestamp --} -- --func (p *Annotation) GetValue() string { -- return p.Value --} --var Annotation_Host_DEFAULT *Endpoint --func (p *Annotation) GetHost() *Endpoint { -- if !p.IsSetHost() { -- return Annotation_Host_DEFAULT -- } --return p.Host --} --func (p *Annotation) IsSetHost() bool { -- return p.Host != nil --} -- --func (p *Annotation) Read(iprot thrift.TProtocol) error { -- if _, err := iprot.ReadStructBegin(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T read error: "", p), err) -- } -- -- -- for { -- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() -- if err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T field %d read error: "", p, fieldId), err) -- } -- if fieldTypeId == thrift.STOP { break; } -- switch fieldId { -- case 1: -- if fieldTypeId == thrift.I64 { -- if err := p.ReadField1(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- case 2: -- if fieldTypeId == thrift.STRING { -- if err := p.ReadField2(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- case 3: -- if fieldTypeId == thrift.STRUCT { -- if err := p.ReadField3(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- default: -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- if err := iprot.ReadFieldEnd(); err != nil { -- return err -- } -- } -- if err := iprot.ReadStructEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T read struct end error: "", p), err) -- } -- return nil --} -- --func (p *Annotation) ReadField1(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadI64(); err != nil { -- return thrift.PrependError(""error reading field 1: "", err) --} else { -- p.Timestamp = v --} -- return nil --} -- --func (p *Annotation) ReadField2(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadString(); err != nil { -- return thrift.PrependError(""error reading field 2: "", err) --} else { -- p.Value = v --} -- return nil --} -- --func (p *Annotation) ReadField3(iprot thrift.TProtocol) error { -- p.Host = &Endpoint{} -- if err := p.Host.Read(iprot); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T error reading struct: "", p.Host), err) -- } -- return nil --} -- --func (p *Annotation) Write(oprot thrift.TProtocol) error { -- if err := oprot.WriteStructBegin(""Annotation""); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write struct begin error: "", p), err) } -- if p != nil { -- if err := p.writeField1(oprot); err != nil { return err } -- if err := p.writeField2(oprot); err != nil { return err } -- if err := p.writeField3(oprot); err != nil { return err } -- } -- if err := oprot.WriteFieldStop(); err != nil { -- return thrift.PrependError(""write field stop error: "", err) } -- if err := oprot.WriteStructEnd(); err != nil { -- return thrift.PrependError(""write struct stop error: "", err) } -- return nil --} -- --func (p *Annotation) writeField1(oprot thrift.TProtocol) (err error) { -- if err := oprot.WriteFieldBegin(""timestamp"", thrift.I64, 1); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 1:timestamp: "", p), err) } -- if err := oprot.WriteI64(int64(p.Timestamp)); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.timestamp (1) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 1:timestamp: "", p), err) } -- return err --} -- --func (p *Annotation) writeField2(oprot thrift.TProtocol) (err error) { -- if err := oprot.WriteFieldBegin(""value"", thrift.STRING, 2); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 2:value: "", p), err) } -- if err := oprot.WriteString(string(p.Value)); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.value (2) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 2:value: "", p), err) } -- return err --} -- --func (p *Annotation) writeField3(oprot thrift.TProtocol) (err error) { -- if p.IsSetHost() { -- if err := oprot.WriteFieldBegin(""host"", thrift.STRUCT, 3); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 3:host: "", p), err) } -- if err := p.Host.Write(oprot); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T error writing struct: "", p.Host), err) -- } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 3:host: "", p), err) } -- } -- return err --} -- --func (p *Annotation) String() string { -- if p == nil { -- return """" -- } -- return fmt.Sprintf(""Annotation(%+v)"", *p) --} -- --// Binary annotations are tags applied to a Span to give it context. For --// example, a binary annotation of HTTP_PATH (""http.path"") could the path --// to a resource in a RPC call. --// --// Binary annotations of type STRING are always queryable, though more a --// historical implementation detail than a structural concern. --// --// Binary annotations can repeat, and vary on the host. Similar to Annotation, --// the host indicates who logged the event. This allows you to tell the --// difference between the client and server side of the same key. For example, --// the key ""http.path"" might be different on the client and server side due to --// rewriting, like ""/api/v1/myresource"" vs ""/myresource. Via the host field, --// you can see the different points of view, which often help in debugging. --// --// Attributes: --// - Key: Name used to lookup spans, such as ""http.path"" or ""finagle.version"". --// - Value: Serialized thrift bytes, in TBinaryProtocol format. --// --// For legacy reasons, byte order is big-endian. See THRIFT-3217. --// - AnnotationType: The thrift type of value, most often STRING. --// --// annotation_type shouldn't vary for the same key. --// - Host: The host that recorded value, allowing query by service name or address. --// --// There are two exceptions: when key is ""ca"" or ""sa"", this is the source or --// destination of an RPC. This exception allows zipkin to display network --// context of uninstrumented services, such as browsers or databases. --type BinaryAnnotation struct { -- Key string `thrift:""key,1"" db:""key"" json:""key""` -- Value []byte `thrift:""value,2"" db:""value"" json:""value""` -- AnnotationType AnnotationType `thrift:""annotation_type,3"" db:""annotation_type"" json:""annotation_type""` -- Host *Endpoint `thrift:""host,4"" db:""host"" json:""host,omitempty""` --} -- --func NewBinaryAnnotation() *BinaryAnnotation { -- return &BinaryAnnotation{} --} -- -- --func (p *BinaryAnnotation) GetKey() string { -- return p.Key --} -- --func (p *BinaryAnnotation) GetValue() []byte { -- return p.Value --} -- --func (p *BinaryAnnotation) GetAnnotationType() AnnotationType { -- return p.AnnotationType --} --var BinaryAnnotation_Host_DEFAULT *Endpoint --func (p *BinaryAnnotation) GetHost() *Endpoint { -- if !p.IsSetHost() { -- return BinaryAnnotation_Host_DEFAULT -- } --return p.Host --} --func (p *BinaryAnnotation) IsSetHost() bool { -- return p.Host != nil --} -- --func (p *BinaryAnnotation) Read(iprot thrift.TProtocol) error { -- if _, err := iprot.ReadStructBegin(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T read error: "", p), err) -- } -- -- -- for { -- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() -- if err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T field %d read error: "", p, fieldId), err) -- } -- if fieldTypeId == thrift.STOP { break; } -- switch fieldId { -- case 1: -- if fieldTypeId == thrift.STRING { -- if err := p.ReadField1(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- case 2: -- if fieldTypeId == thrift.STRING { -- if err := p.ReadField2(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- case 3: -- if fieldTypeId == thrift.I32 { -- if err := p.ReadField3(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- case 4: -- if fieldTypeId == thrift.STRUCT { -- if err := p.ReadField4(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- default: -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- if err := iprot.ReadFieldEnd(); err != nil { -- return err -- } -- } -- if err := iprot.ReadStructEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T read struct end error: "", p), err) -- } -- return nil --} -- --func (p *BinaryAnnotation) ReadField1(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadString(); err != nil { -- return thrift.PrependError(""error reading field 1: "", err) --} else { -- p.Key = v --} -- return nil --} -- --func (p *BinaryAnnotation) ReadField2(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadBinary(); err != nil { -- return thrift.PrependError(""error reading field 2: "", err) --} else { -- p.Value = v --} -- return nil --} -- --func (p *BinaryAnnotation) ReadField3(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadI32(); err != nil { -- return thrift.PrependError(""error reading field 3: "", err) --} else { -- temp := AnnotationType(v) -- p.AnnotationType = temp --} -- return nil --} -- --func (p *BinaryAnnotation) ReadField4(iprot thrift.TProtocol) error { -- p.Host = &Endpoint{} -- if err := p.Host.Read(iprot); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T error reading struct: "", p.Host), err) -- } -- return nil --} -- --func (p *BinaryAnnotation) Write(oprot thrift.TProtocol) error { -- if err := oprot.WriteStructBegin(""BinaryAnnotation""); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write struct begin error: "", p), err) } -- if p != nil { -- if err := p.writeField1(oprot); err != nil { return err } -- if err := p.writeField2(oprot); err != nil { return err } -- if err := p.writeField3(oprot); err != nil { return err } -- if err := p.writeField4(oprot); err != nil { return err } -- } -- if err := oprot.WriteFieldStop(); err != nil { -- return thrift.PrependError(""write field stop error: "", err) } -- if err := oprot.WriteStructEnd(); err != nil { -- return thrift.PrependError(""write struct stop error: "", err) } -- return nil --} -- --func (p *BinaryAnnotation) writeField1(oprot thrift.TProtocol) (err error) { -- if err := oprot.WriteFieldBegin(""key"", thrift.STRING, 1); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 1:key: "", p), err) } -- if err := oprot.WriteString(string(p.Key)); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.key (1) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 1:key: "", p), err) } -- return err --} -- --func (p *BinaryAnnotation) writeField2(oprot thrift.TProtocol) (err error) { -- if err := oprot.WriteFieldBegin(""value"", thrift.STRING, 2); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 2:value: "", p), err) } -- if err := oprot.WriteBinary(p.Value); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.value (2) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 2:value: "", p), err) } -- return err --} -- --func (p *BinaryAnnotation) writeField3(oprot thrift.TProtocol) (err error) { -- if err := oprot.WriteFieldBegin(""annotation_type"", thrift.I32, 3); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 3:annotation_type: "", p), err) } -- if err := oprot.WriteI32(int32(p.AnnotationType)); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.annotation_type (3) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 3:annotation_type: "", p), err) } -- return err --} -- --func (p *BinaryAnnotation) writeField4(oprot thrift.TProtocol) (err error) { -- if p.IsSetHost() { -- if err := oprot.WriteFieldBegin(""host"", thrift.STRUCT, 4); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 4:host: "", p), err) } -- if err := p.Host.Write(oprot); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T error writing struct: "", p.Host), err) -- } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 4:host: "", p), err) } -- } -- return err --} -- --func (p *BinaryAnnotation) String() string { -- if p == nil { -- return """" -- } -- return fmt.Sprintf(""BinaryAnnotation(%+v)"", *p) --} -- --// A trace is a series of spans (often RPC calls) which form a latency tree. --// --// Spans are usually created by instrumentation in RPC clients or servers, but --// can also represent in-process activity. Annotations in spans are similar to --// log statements, and are sometimes created directly by application developers --// to indicate events of interest, such as a cache miss. --// --// The root span is where parent_id = Nil; it usually has the longest duration --// in the trace. --// --// Span identifiers are packed into i64s, but should be treated opaquely. --// String encoding is fixed-width lower-hex, to avoid signed interpretation. --// --// Attributes: --// - TraceID: Unique 8-byte identifier for a trace, set on all spans within it. --// - Name: Span name in lowercase, rpc method for example. Conventionally, when the --// span name isn't known, name = ""unknown"". --// - ID: Unique 8-byte identifier of this span within a trace. A span is uniquely --// identified in storage by (trace_id, id). --// - ParentID: The parent's Span.id; absent if this the root span in a trace. --// - Annotations: Associates events that explain latency with a timestamp. Unlike log --// statements, annotations are often codes: for example SERVER_RECV(""sr""). --// Annotations are sorted ascending by timestamp. --// - BinaryAnnotations: Tags a span with context, usually to support query or aggregation. For --// example, a binary annotation key could be ""http.path"". --// - Debug: True is a request to store this span even if it overrides sampling policy. --// - Timestamp: Epoch microseconds of the start of this span, absent if this an incomplete --// span. --// --// This value should be set directly by instrumentation, using the most --// precise value possible. For example, gettimeofday or syncing nanoTime --// against a tick of currentTimeMillis. --// --// For compatibility with instrumentation that precede this field, collectors --// or span stores can derive this via Annotation.timestamp. --// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp. --// --// Timestamp is nullable for input only. Spans without a timestamp cannot be --// presented in a timeline: Span stores should not output spans missing a --// timestamp. --// --// There are two known edge-cases where this could be absent: both cases --// exist when a collector receives a span in parts and a binary annotation --// precedes a timestamp. This is possible when.. --// - The span is in-flight (ex not yet received a timestamp) --// - The span's start event was lost --// - Duration: Measurement in microseconds of the critical path, if known. Durations of --// less than one microsecond must be rounded up to 1 microsecond. --// --// This value should be set directly, as opposed to implicitly via annotation --// timestamps. Doing so encourages precision decoupled from problems of --// clocks, such as skew or NTP updates causing time to move backwards. --// --// For compatibility with instrumentation that precede this field, collectors --// or span stores can derive this by subtracting Annotation.timestamp. --// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp. --// --// If this field is persisted as unset, zipkin will continue to work, except --// duration query support will be implementation-specific. Similarly, setting --// this field non-atomically is implementation-specific. --// --// This field is i64 vs i32 to support spans longer than 35 minutes. --// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this --// means the trace uses 128 bit traceIds instead of 64 bit. --type Span struct { -- TraceID int64 `thrift:""trace_id,1"" db:""trace_id"" json:""trace_id""` -- // unused field # 2 -- Name string `thrift:""name,3"" db:""name"" json:""name""` -- ID int64 `thrift:""id,4"" db:""id"" json:""id""` -- ParentID *int64 `thrift:""parent_id,5"" db:""parent_id"" json:""parent_id,omitempty""` -- Annotations []*Annotation `thrift:""annotations,6"" db:""annotations"" json:""annotations""` -- // unused field # 7 -- BinaryAnnotations []*BinaryAnnotation `thrift:""binary_annotations,8"" db:""binary_annotations"" json:""binary_annotations""` -- Debug bool `thrift:""debug,9"" db:""debug"" json:""debug,omitempty""` -- Timestamp *int64 `thrift:""timestamp,10"" db:""timestamp"" json:""timestamp,omitempty""` -- Duration *int64 `thrift:""duration,11"" db:""duration"" json:""duration,omitempty""` -- TraceIDHigh *int64 `thrift:""trace_id_high,12"" db:""trace_id_high"" json:""trace_id_high,omitempty""` --} -- --func NewSpan() *Span { -- return &Span{} --} -- -- --func (p *Span) GetTraceID() int64 { -- return p.TraceID --} -- --func (p *Span) GetName() string { -- return p.Name --} -- --func (p *Span) GetID() int64 { -- return p.ID --} --var Span_ParentID_DEFAULT int64 --func (p *Span) GetParentID() int64 { -- if !p.IsSetParentID() { -- return Span_ParentID_DEFAULT -- } --return *p.ParentID --} -- --func (p *Span) GetAnnotations() []*Annotation { -- return p.Annotations --} -- --func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation { -- return p.BinaryAnnotations --} --var Span_Debug_DEFAULT bool = false -- --func (p *Span) GetDebug() bool { -- return p.Debug --} --var Span_Timestamp_DEFAULT int64 --func (p *Span) GetTimestamp() int64 { -- if !p.IsSetTimestamp() { -- return Span_Timestamp_DEFAULT -- } --return *p.Timestamp --} --var Span_Duration_DEFAULT int64 --func (p *Span) GetDuration() int64 { -- if !p.IsSetDuration() { -- return Span_Duration_DEFAULT -- } --return *p.Duration --} --var Span_TraceIDHigh_DEFAULT int64 --func (p *Span) GetTraceIDHigh() int64 { -- if !p.IsSetTraceIDHigh() { -- return Span_TraceIDHigh_DEFAULT -- } --return *p.TraceIDHigh --} --func (p *Span) IsSetParentID() bool { -- return p.ParentID != nil --} -- --func (p *Span) IsSetDebug() bool { -- return p.Debug != Span_Debug_DEFAULT --} -- --func (p *Span) IsSetTimestamp() bool { -- return p.Timestamp != nil --} -- --func (p *Span) IsSetDuration() bool { -- return p.Duration != nil --} -- --func (p *Span) IsSetTraceIDHigh() bool { -- return p.TraceIDHigh != nil --} -- --func (p *Span) Read(iprot thrift.TProtocol) error { -- if _, err := iprot.ReadStructBegin(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T read error: "", p), err) -- } -- -- -- for { -- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() -- if err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T field %d read error: "", p, fieldId), err) -- } -- if fieldTypeId == thrift.STOP { break; } -- switch fieldId { -- case 1: -- if fieldTypeId == thrift.I64 { -- if err := p.ReadField1(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- case 3: -- if fieldTypeId == thrift.STRING { -- if err := p.ReadField3(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- case 4: -- if fieldTypeId == thrift.I64 { -- if err := p.ReadField4(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- case 5: -- if fieldTypeId == thrift.I64 { -- if err := p.ReadField5(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- case 6: -- if fieldTypeId == thrift.LIST { -- if err := p.ReadField6(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- case 8: -- if fieldTypeId == thrift.LIST { -- if err := p.ReadField8(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- case 9: -- if fieldTypeId == thrift.BOOL { -- if err := p.ReadField9(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- case 10: -- if fieldTypeId == thrift.I64 { -- if err := p.ReadField10(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- case 11: -- if fieldTypeId == thrift.I64 { -- if err := p.ReadField11(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- case 12: -- if fieldTypeId == thrift.I64 { -- if err := p.ReadField12(iprot); err != nil { -- return err -- } -- } else { -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- default: -- if err := iprot.Skip(fieldTypeId); err != nil { -- return err -- } -- } -- if err := iprot.ReadFieldEnd(); err != nil { -- return err -- } -- } -- if err := iprot.ReadStructEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T read struct end error: "", p), err) -- } -- return nil --} -- --func (p *Span) ReadField1(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadI64(); err != nil { -- return thrift.PrependError(""error reading field 1: "", err) --} else { -- p.TraceID = v --} -- return nil --} -- --func (p *Span) ReadField3(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadString(); err != nil { -- return thrift.PrependError(""error reading field 3: "", err) --} else { -- p.Name = v --} -- return nil --} -- --func (p *Span) ReadField4(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadI64(); err != nil { -- return thrift.PrependError(""error reading field 4: "", err) --} else { -- p.ID = v --} -- return nil --} -- --func (p *Span) ReadField5(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadI64(); err != nil { -- return thrift.PrependError(""error reading field 5: "", err) --} else { -- p.ParentID = &v --} -- return nil --} -- --func (p *Span) ReadField6(iprot thrift.TProtocol) error { -- _, size, err := iprot.ReadListBegin() -- if err != nil { -- return thrift.PrependError(""error reading list begin: "", err) -- } -- tSlice := make([]*Annotation, 0, size) -- p.Annotations = tSlice -- for i := 0; i < size; i ++ { -- _elem0 := &Annotation{} -- if err := _elem0.Read(iprot); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T error reading struct: "", _elem0), err) -- } -- p.Annotations = append(p.Annotations, _elem0) -- } -- if err := iprot.ReadListEnd(); err != nil { -- return thrift.PrependError(""error reading list end: "", err) -- } -- return nil --} -- --func (p *Span) ReadField8(iprot thrift.TProtocol) error { -- _, size, err := iprot.ReadListBegin() -- if err != nil { -- return thrift.PrependError(""error reading list begin: "", err) -- } -- tSlice := make([]*BinaryAnnotation, 0, size) -- p.BinaryAnnotations = tSlice -- for i := 0; i < size; i ++ { -- _elem1 := &BinaryAnnotation{} -- if err := _elem1.Read(iprot); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T error reading struct: "", _elem1), err) -- } -- p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1) -- } -- if err := iprot.ReadListEnd(); err != nil { -- return thrift.PrependError(""error reading list end: "", err) -- } -- return nil --} -- --func (p *Span) ReadField9(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadBool(); err != nil { -- return thrift.PrependError(""error reading field 9: "", err) --} else { -- p.Debug = v --} -- return nil --} -- --func (p *Span) ReadField10(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadI64(); err != nil { -- return thrift.PrependError(""error reading field 10: "", err) --} else { -- p.Timestamp = &v --} -- return nil --} -- --func (p *Span) ReadField11(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadI64(); err != nil { -- return thrift.PrependError(""error reading field 11: "", err) --} else { -- p.Duration = &v --} -- return nil --} -- --func (p *Span) ReadField12(iprot thrift.TProtocol) error { -- if v, err := iprot.ReadI64(); err != nil { -- return thrift.PrependError(""error reading field 12: "", err) --} else { -- p.TraceIDHigh = &v --} -- return nil --} -- --func (p *Span) Write(oprot thrift.TProtocol) error { -- if err := oprot.WriteStructBegin(""Span""); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write struct begin error: "", p), err) } -- if p != nil { -- if err := p.writeField1(oprot); err != nil { return err } -- if err := p.writeField3(oprot); err != nil { return err } -- if err := p.writeField4(oprot); err != nil { return err } -- if err := p.writeField5(oprot); err != nil { return err } -- if err := p.writeField6(oprot); err != nil { return err } -- if err := p.writeField8(oprot); err != nil { return err } -- if err := p.writeField9(oprot); err != nil { return err } -- if err := p.writeField10(oprot); err != nil { return err } -- if err := p.writeField11(oprot); err != nil { return err } -- if err := p.writeField12(oprot); err != nil { return err } -- } -- if err := oprot.WriteFieldStop(); err != nil { -- return thrift.PrependError(""write field stop error: "", err) } -- if err := oprot.WriteStructEnd(); err != nil { -- return thrift.PrependError(""write struct stop error: "", err) } -- return nil --} -- --func (p *Span) writeField1(oprot thrift.TProtocol) (err error) { -- if err := oprot.WriteFieldBegin(""trace_id"", thrift.I64, 1); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 1:trace_id: "", p), err) } -- if err := oprot.WriteI64(int64(p.TraceID)); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.trace_id (1) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 1:trace_id: "", p), err) } -- return err --} -- --func (p *Span) writeField3(oprot thrift.TProtocol) (err error) { -- if err := oprot.WriteFieldBegin(""name"", thrift.STRING, 3); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 3:name: "", p), err) } -- if err := oprot.WriteString(string(p.Name)); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.name (3) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 3:name: "", p), err) } -- return err --} -- --func (p *Span) writeField4(oprot thrift.TProtocol) (err error) { -- if err := oprot.WriteFieldBegin(""id"", thrift.I64, 4); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 4:id: "", p), err) } -- if err := oprot.WriteI64(int64(p.ID)); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.id (4) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 4:id: "", p), err) } -- return err --} -- --func (p *Span) writeField5(oprot thrift.TProtocol) (err error) { -- if p.IsSetParentID() { -- if err := oprot.WriteFieldBegin(""parent_id"", thrift.I64, 5); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 5:parent_id: "", p), err) } -- if err := oprot.WriteI64(int64(*p.ParentID)); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.parent_id (5) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 5:parent_id: "", p), err) } -- } -- return err --} -- --func (p *Span) writeField6(oprot thrift.TProtocol) (err error) { -- if err := oprot.WriteFieldBegin(""annotations"", thrift.LIST, 6); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 6:annotations: "", p), err) } -- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Annotations)); err != nil { -- return thrift.PrependError(""error writing list begin: "", err) -- } -- for _, v := range p.Annotations { -- if err := v.Write(oprot); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T error writing struct: "", v), err) -- } -- } -- if err := oprot.WriteListEnd(); err != nil { -- return thrift.PrependError(""error writing list end: "", err) -- } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 6:annotations: "", p), err) } -- return err --} -- --func (p *Span) writeField8(oprot thrift.TProtocol) (err error) { -- if err := oprot.WriteFieldBegin(""binary_annotations"", thrift.LIST, 8); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 8:binary_annotations: "", p), err) } -- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.BinaryAnnotations)); err != nil { -- return thrift.PrependError(""error writing list begin: "", err) -- } -- for _, v := range p.BinaryAnnotations { -- if err := v.Write(oprot); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T error writing struct: "", v), err) -- } -- } -- if err := oprot.WriteListEnd(); err != nil { -- return thrift.PrependError(""error writing list end: "", err) -- } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 8:binary_annotations: "", p), err) } -- return err --} -- --func (p *Span) writeField9(oprot thrift.TProtocol) (err error) { -- if p.IsSetDebug() { -- if err := oprot.WriteFieldBegin(""debug"", thrift.BOOL, 9); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 9:debug: "", p), err) } -- if err := oprot.WriteBool(bool(p.Debug)); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.debug (9) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 9:debug: "", p), err) } -- } -- return err --} -- --func (p *Span) writeField10(oprot thrift.TProtocol) (err error) { -- if p.IsSetTimestamp() { -- if err := oprot.WriteFieldBegin(""timestamp"", thrift.I64, 10); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 10:timestamp: "", p), err) } -- if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.timestamp (10) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 10:timestamp: "", p), err) } -- } -- return err --} -- --func (p *Span) writeField11(oprot thrift.TProtocol) (err error) { -- if p.IsSetDuration() { -- if err := oprot.WriteFieldBegin(""duration"", thrift.I64, 11); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 11:duration: "", p), err) } -- if err := oprot.WriteI64(int64(*p.Duration)); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.duration (11) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 11:duration: "", p), err) } -- } -- return err --} -- --func (p *Span) writeField12(oprot thrift.TProtocol) (err error) { -- if p.IsSetTraceIDHigh() { -- if err := oprot.WriteFieldBegin(""trace_id_high"", thrift.I64, 12); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field begin error 12:trace_id_high: "", p), err) } -- if err := oprot.WriteI64(int64(*p.TraceIDHigh)); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T.trace_id_high (12) field write error: "", p), err) } -- if err := oprot.WriteFieldEnd(); err != nil { -- return thrift.PrependError(fmt.Sprintf(""%T write field end error 12:trace_id_high: "", p), err) } -- } -- return err --} -- --func (p *Span) String() string { -- if p == nil { -- return """" -- } -- return fmt.Sprintf(""Span(%+v)"", *p) --} -- -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/tracer.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/tracer.go -deleted file mode 100644 -index 5754d5fc55b9a..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/tracer.go -+++ /dev/null -@@ -1,440 +0,0 @@ --package zipkintracer -- --import ( -- ""errors"" -- ""time"" -- -- opentracing ""github.com/opentracing/opentracing-go"" -- ""github.com/opentracing/opentracing-go/ext"" -- -- otobserver ""github.com/opentracing-contrib/go-observer"" -- ""github.com/openzipkin/zipkin-go-opentracing/flag"" --) -- --// ErrInvalidEndpoint will be thrown if hostPort parameter is corrupted or host --// can't be resolved --var ErrInvalidEndpoint = errors.New(""Invalid Endpoint. Please check hostPort parameter"") -- --// Tracer extends the opentracing.Tracer interface with methods to --// probe implementation state, for use by zipkintracer consumers. --type Tracer interface { -- opentracing.Tracer -- -- // Options gets the Options used in New() or NewWithOptions(). -- Options() TracerOptions --} -- --// TracerOptions allows creating a customized Tracer. --type TracerOptions struct { -- // shouldSample is a function which is called when creating a new Span and -- // determines whether that Span is sampled. The randomized TraceID is supplied -- // to allow deterministic sampling decisions to be made across different nodes. -- shouldSample func(traceID uint64) bool -- // trimUnsampledSpans turns potentially expensive operations on unsampled -- // Spans into no-ops. More precisely, tags and log events are silently -- // discarded. If NewSpanEventListener is set, the callbacks will still fire. -- trimUnsampledSpans bool -- // recorder receives Spans which have been finished. -- recorder SpanRecorder -- // newSpanEventListener can be used to enhance the tracer by effectively -- // attaching external code to trace events. See NetTraceIntegrator for a -- // practical example, and event.go for the list of possible events. -- newSpanEventListener func() func(SpanEvent) -- // dropAllLogs turns log events on all Spans into no-ops. -- // If NewSpanEventListener is set, the callbacks will still fire. -- dropAllLogs bool -- // MaxLogsPerSpan limits the number of Logs in a span (if set to a nonzero -- // value). If a span has more logs than this value, logs are dropped as -- // necessary (and replaced with a log describing how many were dropped). -- // -- // About half of the MaxLogPerSpan logs kept are the oldest logs, and about -- // half are the newest logs. -- // -- // If NewSpanEventListener is set, the callbacks will still fire for all log -- // events. This value is ignored if DropAllLogs is true. -- maxLogsPerSpan int -- // debugAssertSingleGoroutine internally records the ID of the goroutine -- // creating each Span and verifies that no operation is carried out on -- // it on a different goroutine. -- // Provided strictly for development purposes. -- // Passing Spans between goroutine without proper synchronization often -- // results in use-after-Finish() errors. For a simple example, consider the -- // following pseudocode: -- // -- // func (s *Server) Handle(req http.Request) error { -- // sp := s.StartSpan(""server"") -- // defer sp.Finish() -- // wait := s.queueProcessing(opentracing.ContextWithSpan(context.Background(), sp), req) -- // select { -- // case resp := <-wait: -- // return resp.Error -- // case <-time.After(10*time.Second): -- // sp.LogEvent(""timed out waiting for processing"") -- // return ErrTimedOut -- // } -- // } -- // -- // This looks reasonable at first, but a request which spends more than ten -- // seconds in the queue is abandoned by the main goroutine and its trace -- // finished, leading to use-after-finish when the request is finally -- // processed. Note also that even joining on to a finished Span via -- // StartSpanWithOptions constitutes an illegal operation. -- // -- // Code bases which do not require (or decide they do not want) Spans to -- // be passed across goroutine boundaries can run with this flag enabled in -- // tests to increase their chances of spotting wrong-doers. -- debugAssertSingleGoroutine bool -- // debugAssertUseAfterFinish is provided strictly for development purposes. -- // When set, it attempts to exacerbate issues emanating from use of Spans -- // after calling Finish by running additional assertions. -- debugAssertUseAfterFinish bool -- // enableSpanPool enables the use of a pool, so that the tracer reuses spans -- // after Finish has been called on it. Adds a slight performance gain as it -- // reduces allocations. However, if you have any use-after-finish race -- // conditions the code may panic. -- enableSpanPool bool -- // logger ... -- logger Logger -- // clientServerSameSpan allows for Zipkin V1 style span per RPC. This places -- // both client end and server end of a RPC call into the same span. -- clientServerSameSpan bool -- // debugMode activates Zipkin's debug request allowing for all Spans originating -- // from this tracer to pass through and bypass sampling. Use with extreme care -- // as it might flood your system if you have many traces starting from the -- // service you are instrumenting. -- debugMode bool -- // traceID128Bit enables the generation of 128 bit traceIDs in case the tracer -- // needs to create a root span. By default regular 64 bit traceIDs are used. -- // Regardless of this setting, the library will propagate and support both -- // 64 and 128 bit incoming traces from upstream sources. -- traceID128Bit bool -- -- observer otobserver.Observer --} -- --// TracerOption allows for functional options. --// See: http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis --type TracerOption func(opts *TracerOptions) error -- --// WithSampler allows one to add a Sampler function --func WithSampler(sampler Sampler) TracerOption { -- return func(opts *TracerOptions) error { -- opts.shouldSample = sampler -- return nil -- } --} -- --// TrimUnsampledSpans option --func TrimUnsampledSpans(trim bool) TracerOption { -- return func(opts *TracerOptions) error { -- opts.trimUnsampledSpans = trim -- return nil -- } --} -- --// DropAllLogs option --func DropAllLogs(dropAllLogs bool) TracerOption { -- return func(opts *TracerOptions) error { -- opts.dropAllLogs = dropAllLogs -- return nil -- } --} -- --// WithLogger option --func WithLogger(logger Logger) TracerOption { -- return func(opts *TracerOptions) error { -- opts.logger = logger -- return nil -- } --} -- --// DebugAssertSingleGoroutine option --func DebugAssertSingleGoroutine(val bool) TracerOption { -- return func(opts *TracerOptions) error { -- opts.debugAssertSingleGoroutine = val -- return nil -- } --} -- --// DebugAssertUseAfterFinish option --func DebugAssertUseAfterFinish(val bool) TracerOption { -- return func(opts *TracerOptions) error { -- opts.debugAssertUseAfterFinish = val -- return nil -- } --} -- --// TraceID128Bit option --func TraceID128Bit(val bool) TracerOption { -- return func(opts *TracerOptions) error { -- opts.traceID128Bit = val -- return nil -- } --} -- --// ClientServerSameSpan allows to place client-side and server-side annotations --// for a RPC call in the same span (Zipkin V1 behavior) or different spans --// (more in line with other tracing solutions). By default this Tracer --// uses shared host spans (so client-side and server-side in the same span). --// If using separate spans you might run into trouble with Zipkin V1 as clock --// skew issues can't be remedied at Zipkin server side. --func ClientServerSameSpan(val bool) TracerOption { -- return func(opts *TracerOptions) error { -- opts.clientServerSameSpan = val -- return nil -- } --} -- --// DebugMode allows to set the tracer to Zipkin debug mode --func DebugMode(val bool) TracerOption { -- return func(opts *TracerOptions) error { -- opts.debugMode = val -- return nil -- } --} -- --// EnableSpanPool ... --func EnableSpanPool(val bool) TracerOption { -- return func(opts *TracerOptions) error { -- opts.enableSpanPool = val -- return nil -- } --} -- --// NewSpanEventListener option --func NewSpanEventListener(f func() func(SpanEvent)) TracerOption { -- return func(opts *TracerOptions) error { -- opts.newSpanEventListener = f -- return nil -- } --} -- --// WithMaxLogsPerSpan option --func WithMaxLogsPerSpan(limit int) TracerOption { -- return func(opts *TracerOptions) error { -- if limit < 5 || limit > 10000 { -- return errors.New(""invalid MaxLogsPerSpan limit. Should be between 5 and 10000"") -- } -- opts.maxLogsPerSpan = limit -- return nil -- } --} -- --// NewTracer creates a new OpenTracing compatible Zipkin Tracer. --func NewTracer(recorder SpanRecorder, options ...TracerOption) (opentracing.Tracer, error) { -- opts := &TracerOptions{ -- recorder: recorder, -- shouldSample: alwaysSample, -- trimUnsampledSpans: false, -- newSpanEventListener: func() func(SpanEvent) { return nil }, -- logger: &nopLogger{}, -- debugAssertSingleGoroutine: false, -- debugAssertUseAfterFinish: false, -- clientServerSameSpan: true, -- debugMode: false, -- traceID128Bit: false, -- maxLogsPerSpan: 10000, -- observer: nil, -- } -- for _, o := range options { -- err := o(opts) -- if err != nil { -- return nil, err -- } -- } -- rval := &tracerImpl{options: *opts} -- rval.textPropagator = &textMapPropagator{rval} -- rval.binaryPropagator = &binaryPropagator{rval} -- rval.accessorPropagator = &accessorPropagator{rval} -- return rval, nil --} -- --// Implements the `Tracer` interface. --type tracerImpl struct { -- options TracerOptions -- textPropagator *textMapPropagator -- binaryPropagator *binaryPropagator -- accessorPropagator *accessorPropagator --} -- --func (t *tracerImpl) StartSpan( -- operationName string, -- opts ...opentracing.StartSpanOption, --) opentracing.Span { -- sso := opentracing.StartSpanOptions{} -- for _, o := range opts { -- o.Apply(&sso) -- } -- return t.startSpanWithOptions(operationName, sso) --} -- --func (t *tracerImpl) getSpan() *spanImpl { -- if t.options.enableSpanPool { -- sp := spanPool.Get().(*spanImpl) -- sp.reset() -- return sp -- } -- return &spanImpl{} --} -- --func (t *tracerImpl) startSpanWithOptions( -- operationName string, -- opts opentracing.StartSpanOptions, --) opentracing.Span { -- // Start time. -- startTime := opts.StartTime -- if startTime.IsZero() { -- startTime = time.Now() -- } -- -- // Tags. -- tags := opts.Tags -- -- // Build the new span. This is the only allocation: We'll return this as -- // an opentracing.Span. -- sp := t.getSpan() -- -- if t.options.observer != nil { -- sp.observer, _ = t.options.observer.OnStartSpan(sp, operationName, opts) -- } -- -- // Look for a parent in the list of References. -- // -- // TODO: would be nice if basictracer did something with all -- // References, not just the first one. --ReferencesLoop: -- for _, ref := range opts.References { -- switch ref.Type { -- case opentracing.ChildOfRef: -- refCtx := ref.ReferencedContext.(SpanContext) -- sp.raw.Context.TraceID = refCtx.TraceID -- sp.raw.Context.ParentSpanID = &refCtx.SpanID -- sp.raw.Context.Sampled = refCtx.Sampled -- sp.raw.Context.Flags = refCtx.Flags -- sp.raw.Context.Flags &^= flag.IsRoot // unset IsRoot flag if needed -- -- if t.options.clientServerSameSpan && -- tags[string(ext.SpanKind)] == ext.SpanKindRPCServer.Value { -- sp.raw.Context.SpanID = refCtx.SpanID -- sp.raw.Context.ParentSpanID = refCtx.ParentSpanID -- sp.raw.Context.Owner = false -- } else { -- sp.raw.Context.SpanID = randomID() -- sp.raw.Context.ParentSpanID = &refCtx.SpanID -- sp.raw.Context.Owner = true -- } -- -- if l := len(refCtx.Baggage); l > 0 { -- sp.raw.Context.Baggage = make(map[string]string, l) -- for k, v := range refCtx.Baggage { -- sp.raw.Context.Baggage[k] = v -- } -- } -- break ReferencesLoop -- case opentracing.FollowsFromRef: -- refCtx := ref.ReferencedContext.(SpanContext) -- sp.raw.Context.TraceID = refCtx.TraceID -- sp.raw.Context.ParentSpanID = &refCtx.SpanID -- sp.raw.Context.Sampled = refCtx.Sampled -- sp.raw.Context.Flags = refCtx.Flags -- sp.raw.Context.Flags &^= flag.IsRoot // unset IsRoot flag if needed -- -- sp.raw.Context.SpanID = randomID() -- sp.raw.Context.ParentSpanID = &refCtx.SpanID -- sp.raw.Context.Owner = true -- -- if l := len(refCtx.Baggage); l > 0 { -- sp.raw.Context.Baggage = make(map[string]string, l) -- for k, v := range refCtx.Baggage { -- sp.raw.Context.Baggage[k] = v -- } -- } -- break ReferencesLoop -- } -- } -- if sp.raw.Context.TraceID.Empty() { -- // No parent Span found; allocate new trace and span ids and determine -- // the Sampled status. -- if t.options.traceID128Bit { -- sp.raw.Context.TraceID.High = randomID() -- } -- sp.raw.Context.TraceID.Low, sp.raw.Context.SpanID = randomID2() -- sp.raw.Context.Sampled = t.options.shouldSample(sp.raw.Context.TraceID.Low) -- sp.raw.Context.Flags = flag.IsRoot -- sp.raw.Context.Owner = true -- } -- if t.options.debugMode { -- sp.raw.Context.Flags |= flag.Debug -- } -- return t.startSpanInternal( -- sp, -- operationName, -- startTime, -- tags, -- ) --} -- --func (t *tracerImpl) startSpanInternal( -- sp *spanImpl, -- operationName string, -- startTime time.Time, -- tags opentracing.Tags, --) opentracing.Span { -- sp.tracer = t -- if t.options.newSpanEventListener != nil { -- sp.event = t.options.newSpanEventListener() -- } -- sp.raw.Operation = operationName -- sp.raw.Start = startTime -- sp.raw.Duration = -1 -- sp.raw.Tags = tags -- -- if t.options.debugAssertSingleGoroutine { -- sp.SetTag(debugGoroutineIDTag, curGoroutineID()) -- } -- defer sp.onCreate(operationName) -- return sp --} -- --type delegatorType struct{} -- --// Delegator is the format to use for DelegatingCarrier. --var Delegator delegatorType -- --func (t *tracerImpl) Inject(sc opentracing.SpanContext, format interface{}, carrier interface{}) error { -- switch format { -- case opentracing.TextMap, opentracing.HTTPHeaders: -- return t.textPropagator.Inject(sc, carrier) -- case opentracing.Binary: -- return t.binaryPropagator.Inject(sc, carrier) -- } -- if _, ok := format.(delegatorType); ok { -- return t.accessorPropagator.Inject(sc, carrier) -- } -- return opentracing.ErrUnsupportedFormat --} -- --func (t *tracerImpl) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) { -- switch format { -- case opentracing.TextMap, opentracing.HTTPHeaders: -- return t.textPropagator.Extract(carrier) -- case opentracing.Binary: -- return t.binaryPropagator.Extract(carrier) -- } -- if _, ok := format.(delegatorType); ok { -- return t.accessorPropagator.Extract(carrier) -- } -- return nil, opentracing.ErrUnsupportedFormat --} -- --func (t *tracerImpl) Options() TracerOptions { -- return t.options --} -- --// WithObserver assigns an initialized observer to opts.observer --func WithObserver(observer otobserver.Observer) TracerOption { -- return func(opts *TracerOptions) error { -- opts.observer = observer -- return nil -- } --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/types/traceid.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/types/traceid.go -deleted file mode 100644 -index a8058ba45d60c..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/types/traceid.go -+++ /dev/null -@@ -1,38 +0,0 @@ --package types -- --import ( -- ""fmt"" -- ""strconv"" --) -- --// TraceID is a 128 bit number internally stored as 2x uint64 (high & low). --type TraceID struct { -- High uint64 -- Low uint64 --} -- --// TraceIDFromHex returns the TraceID from a Hex string. --func TraceIDFromHex(h string) (t TraceID, err error) { -- if len(h) > 16 { -- if t.High, err = strconv.ParseUint(h[0:len(h)-16], 16, 64); err != nil { -- return -- } -- t.Low, err = strconv.ParseUint(h[len(h)-16:], 16, 64) -- return -- } -- t.Low, err = strconv.ParseUint(h, 16, 64) -- return --} -- --// ToHex outputs the 128-bit traceID as hex string. --func (t TraceID) ToHex() string { -- if t.High == 0 { -- return fmt.Sprintf(""%016x"", t.Low) -- } -- return fmt.Sprintf(""%016x%016x"", t.High, t.Low) --} -- --// Empty returns if TraceID has zero value --func (t TraceID) Empty() bool { -- return t.Low == 0 && t.High == 0 --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/util.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/util.go -deleted file mode 100644 -index 27066150222f5..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/util.go -+++ /dev/null -@@ -1,25 +0,0 @@ --package zipkintracer -- --import ( -- ""math/rand"" -- ""sync"" -- ""time"" --) -- --var ( -- seededIDGen = rand.New(rand.NewSource(time.Now().UnixNano())) -- // The golang rand generators are *not* intrinsically thread-safe. -- seededIDLock sync.Mutex --) -- --func randomID() uint64 { -- seededIDLock.Lock() -- defer seededIDLock.Unlock() -- return uint64(seededIDGen.Int63()) --} -- --func randomID2() (uint64, uint64) { -- seededIDLock.Lock() -- defer seededIDLock.Unlock() -- return uint64(seededIDGen.Int63()), uint64(seededIDGen.Int63()) --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/carrier.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/carrier.go -deleted file mode 100644 -index 79364998ced2c..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/carrier.go -+++ /dev/null -@@ -1,65 +0,0 @@ --package wire -- --import ( -- ""github.com/openzipkin/zipkin-go-opentracing/flag"" -- ""github.com/openzipkin/zipkin-go-opentracing/types"" --) -- --// ProtobufCarrier is a DelegatingCarrier that uses protocol buffers as the --// the underlying datastructure. The reason for implementing DelagatingCarrier --// is to allow for end users to serialize the underlying protocol buffers using --// jsonpb or any other serialization forms they want. --type ProtobufCarrier TracerState -- --// SetState set's the tracer state. --func (p *ProtobufCarrier) SetState(traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags) { -- p.TraceId = traceID.Low -- p.TraceIdHigh = traceID.High -- p.SpanId = spanID -- if parentSpanID == nil { -- flags |= flag.IsRoot -- p.ParentSpanId = 0 -- } else { -- flags &^= flag.IsRoot -- p.ParentSpanId = *parentSpanID -- } -- flags |= flag.SamplingSet -- if sampled { -- flags |= flag.Sampled -- p.Sampled = sampled -- } else { -- flags &^= flag.Sampled -- } -- p.Flags = uint64(flags) --} -- --// State returns the tracer state. --func (p *ProtobufCarrier) State() (traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags) { -- traceID.Low = p.TraceId -- traceID.High = p.TraceIdHigh -- spanID = p.SpanId -- sampled = p.Sampled -- flags = flag.Flags(p.Flags) -- if flags&flag.IsRoot == 0 { -- parentSpanID = &p.ParentSpanId -- } -- return traceID, spanID, parentSpanID, sampled, flags --} -- --// SetBaggageItem sets a baggage item. --func (p *ProtobufCarrier) SetBaggageItem(key, value string) { -- if p.BaggageItems == nil { -- p.BaggageItems = map[string]string{key: value} -- return -- } -- -- p.BaggageItems[key] = value --} -- --// GetBaggage iterates over each baggage item and executes the callback with --// the key:value pair. --func (p *ProtobufCarrier) GetBaggage(f func(k, v string)) { -- for k, v := range p.BaggageItems { -- f(k, v) -- } --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/gen.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/gen.go -deleted file mode 100644 -index 0eb355bab17d3..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/gen.go -+++ /dev/null -@@ -1,6 +0,0 @@ --package wire -- --//go:generate protoc --gogofaster_out=$GOPATH/src wire.proto -- --// Run `go get github.com/gogo/protobuf/protoc-gen-gogofaster` to install the --// gogofaster generator binary. -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.pb.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.pb.go -deleted file mode 100644 -index 9aa7b2c908c83..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.pb.go -+++ /dev/null -@@ -1,649 +0,0 @@ --// Code generated by protoc-gen-gogo. --// source: wire.proto --// DO NOT EDIT! -- --/* -- Package wire is a generated protocol buffer package. -- -- It is generated from these files: -- wire.proto -- -- It has these top-level messages: -- TracerState --*/ --package wire -- --import proto ""github.com/gogo/protobuf/proto"" --import fmt ""fmt"" --import math ""math"" -- --import io ""io"" -- --// Reference imports to suppress errors if they are not otherwise used. --var _ = proto.Marshal --var _ = fmt.Errorf --var _ = math.Inf -- --// This is a compile-time assertion to ensure that this generated file --// is compatible with the proto package it is being compiled against. --// A compilation error at this line likely means your copy of the --// proto package needs to be updated. --const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -- --type TracerState struct { -- TraceId uint64 `protobuf:""fixed64,1,opt,name=trace_id,json=traceId,proto3"" json:""trace_id,omitempty""` -- SpanId uint64 `protobuf:""fixed64,2,opt,name=span_id,json=spanId,proto3"" json:""span_id,omitempty""` -- Sampled bool `protobuf:""varint,3,opt,name=sampled,proto3"" json:""sampled,omitempty""` -- BaggageItems map[string]string `protobuf:""bytes,4,rep,name=baggage_items,json=baggageItems"" json:""baggage_items,omitempty"" protobuf_key:""bytes,1,opt,name=key,proto3"" protobuf_val:""bytes,2,opt,name=value,proto3""` -- TraceIdHigh uint64 `protobuf:""fixed64,20,opt,name=trace_id_high,json=traceIdHigh,proto3"" json:""trace_id_high,omitempty""` -- ParentSpanId uint64 `protobuf:""fixed64,21,opt,name=parent_span_id,json=parentSpanId,proto3"" json:""parent_span_id,omitempty""` -- Flags uint64 `protobuf:""fixed64,22,opt,name=flags,proto3"" json:""flags,omitempty""` --} -- --func (m *TracerState) Reset() { *m = TracerState{} } --func (m *TracerState) String() string { return proto.CompactTextString(m) } --func (*TracerState) ProtoMessage() {} --func (*TracerState) Descriptor() ([]byte, []int) { return fileDescriptorWire, []int{0} } -- --func (m *TracerState) GetTraceId() uint64 { -- if m != nil { -- return m.TraceId -- } -- return 0 --} -- --func (m *TracerState) GetSpanId() uint64 { -- if m != nil { -- return m.SpanId -- } -- return 0 --} -- --func (m *TracerState) GetSampled() bool { -- if m != nil { -- return m.Sampled -- } -- return false --} -- --func (m *TracerState) GetBaggageItems() map[string]string { -- if m != nil { -- return m.BaggageItems -- } -- return nil --} -- --func (m *TracerState) GetTraceIdHigh() uint64 { -- if m != nil { -- return m.TraceIdHigh -- } -- return 0 --} -- --func (m *TracerState) GetParentSpanId() uint64 { -- if m != nil { -- return m.ParentSpanId -- } -- return 0 --} -- --func (m *TracerState) GetFlags() uint64 { -- if m != nil { -- return m.Flags -- } -- return 0 --} -- --func init() { -- proto.RegisterType((*TracerState)(nil), ""zipkintracer_go.wire.TracerState"") --} --func (m *TracerState) Marshal() (dAtA []byte, err error) { -- size := m.Size() -- dAtA = make([]byte, size) -- n, err := m.MarshalTo(dAtA) -- if err != nil { -- return nil, err -- } -- return dAtA[:n], nil --} -- --func (m *TracerState) MarshalTo(dAtA []byte) (int, error) { -- var i int -- _ = i -- var l int -- _ = l -- if m.TraceId != 0 { -- dAtA[i] = 0x9 -- i++ -- i = encodeFixed64Wire(dAtA, i, uint64(m.TraceId)) -- } -- if m.SpanId != 0 { -- dAtA[i] = 0x11 -- i++ -- i = encodeFixed64Wire(dAtA, i, uint64(m.SpanId)) -- } -- if m.Sampled { -- dAtA[i] = 0x18 -- i++ -- if m.Sampled { -- dAtA[i] = 1 -- } else { -- dAtA[i] = 0 -- } -- i++ -- } -- if len(m.BaggageItems) > 0 { -- for k, _ := range m.BaggageItems { -- dAtA[i] = 0x22 -- i++ -- v := m.BaggageItems[k] -- mapSize := 1 + len(k) + sovWire(uint64(len(k))) + 1 + len(v) + sovWire(uint64(len(v))) -- i = encodeVarintWire(dAtA, i, uint64(mapSize)) -- dAtA[i] = 0xa -- i++ -- i = encodeVarintWire(dAtA, i, uint64(len(k))) -- i += copy(dAtA[i:], k) -- dAtA[i] = 0x12 -- i++ -- i = encodeVarintWire(dAtA, i, uint64(len(v))) -- i += copy(dAtA[i:], v) -- } -- } -- if m.TraceIdHigh != 0 { -- dAtA[i] = 0xa1 -- i++ -- dAtA[i] = 0x1 -- i++ -- i = encodeFixed64Wire(dAtA, i, uint64(m.TraceIdHigh)) -- } -- if m.ParentSpanId != 0 { -- dAtA[i] = 0xa9 -- i++ -- dAtA[i] = 0x1 -- i++ -- i = encodeFixed64Wire(dAtA, i, uint64(m.ParentSpanId)) -- } -- if m.Flags != 0 { -- dAtA[i] = 0xb1 -- i++ -- dAtA[i] = 0x1 -- i++ -- i = encodeFixed64Wire(dAtA, i, uint64(m.Flags)) -- } -- return i, nil --} -- --func encodeFixed64Wire(dAtA []byte, offset int, v uint64) int { -- dAtA[offset] = uint8(v) -- dAtA[offset+1] = uint8(v >> 8) -- dAtA[offset+2] = uint8(v >> 16) -- dAtA[offset+3] = uint8(v >> 24) -- dAtA[offset+4] = uint8(v >> 32) -- dAtA[offset+5] = uint8(v >> 40) -- dAtA[offset+6] = uint8(v >> 48) -- dAtA[offset+7] = uint8(v >> 56) -- return offset + 8 --} --func encodeFixed32Wire(dAtA []byte, offset int, v uint32) int { -- dAtA[offset] = uint8(v) -- dAtA[offset+1] = uint8(v >> 8) -- dAtA[offset+2] = uint8(v >> 16) -- dAtA[offset+3] = uint8(v >> 24) -- return offset + 4 --} --func encodeVarintWire(dAtA []byte, offset int, v uint64) int { -- for v >= 1<<7 { -- dAtA[offset] = uint8(v&0x7f | 0x80) -- v >>= 7 -- offset++ -- } -- dAtA[offset] = uint8(v) -- return offset + 1 --} --func (m *TracerState) Size() (n int) { -- var l int -- _ = l -- if m.TraceId != 0 { -- n += 9 -- } -- if m.SpanId != 0 { -- n += 9 -- } -- if m.Sampled { -- n += 2 -- } -- if len(m.BaggageItems) > 0 { -- for k, v := range m.BaggageItems { -- _ = k -- _ = v -- mapEntrySize := 1 + len(k) + sovWire(uint64(len(k))) + 1 + len(v) + sovWire(uint64(len(v))) -- n += mapEntrySize + 1 + sovWire(uint64(mapEntrySize)) -- } -- } -- if m.TraceIdHigh != 0 { -- n += 10 -- } -- if m.ParentSpanId != 0 { -- n += 10 -- } -- if m.Flags != 0 { -- n += 10 -- } -- return n --} -- --func sovWire(x uint64) (n int) { -- for { -- n++ -- x >>= 7 -- if x == 0 { -- break -- } -- } -- return n --} --func sozWire(x uint64) (n int) { -- return sovWire(uint64((x << 1) ^ uint64((int64(x) >> 63)))) --} --func (m *TracerState) Unmarshal(dAtA []byte) error { -- l := len(dAtA) -- iNdEx := 0 -- for iNdEx < l { -- preIndex := iNdEx -- var wire uint64 -- for shift := uint(0); ; shift += 7 { -- if shift >= 64 { -- return ErrIntOverflowWire -- } -- if iNdEx >= l { -- return io.ErrUnexpectedEOF -- } -- b := dAtA[iNdEx] -- iNdEx++ -- wire |= (uint64(b) & 0x7F) << shift -- if b < 0x80 { -- break -- } -- } -- fieldNum := int32(wire >> 3) -- wireType := int(wire & 0x7) -- if wireType == 4 { -- return fmt.Errorf(""proto: TracerState: wiretype end group for non-group"") -- } -- if fieldNum <= 0 { -- return fmt.Errorf(""proto: TracerState: illegal tag %d (wire type %d)"", fieldNum, wire) -- } -- switch fieldNum { -- case 1: -- if wireType != 1 { -- return fmt.Errorf(""proto: wrong wireType = %d for field TraceId"", wireType) -- } -- m.TraceId = 0 -- if (iNdEx + 8) > l { -- return io.ErrUnexpectedEOF -- } -- iNdEx += 8 -- m.TraceId = uint64(dAtA[iNdEx-8]) -- m.TraceId |= uint64(dAtA[iNdEx-7]) << 8 -- m.TraceId |= uint64(dAtA[iNdEx-6]) << 16 -- m.TraceId |= uint64(dAtA[iNdEx-5]) << 24 -- m.TraceId |= uint64(dAtA[iNdEx-4]) << 32 -- m.TraceId |= uint64(dAtA[iNdEx-3]) << 40 -- m.TraceId |= uint64(dAtA[iNdEx-2]) << 48 -- m.TraceId |= uint64(dAtA[iNdEx-1]) << 56 -- case 2: -- if wireType != 1 { -- return fmt.Errorf(""proto: wrong wireType = %d for field SpanId"", wireType) -- } -- m.SpanId = 0 -- if (iNdEx + 8) > l { -- return io.ErrUnexpectedEOF -- } -- iNdEx += 8 -- m.SpanId = uint64(dAtA[iNdEx-8]) -- m.SpanId |= uint64(dAtA[iNdEx-7]) << 8 -- m.SpanId |= uint64(dAtA[iNdEx-6]) << 16 -- m.SpanId |= uint64(dAtA[iNdEx-5]) << 24 -- m.SpanId |= uint64(dAtA[iNdEx-4]) << 32 -- m.SpanId |= uint64(dAtA[iNdEx-3]) << 40 -- m.SpanId |= uint64(dAtA[iNdEx-2]) << 48 -- m.SpanId |= uint64(dAtA[iNdEx-1]) << 56 -- case 3: -- if wireType != 0 { -- return fmt.Errorf(""proto: wrong wireType = %d for field Sampled"", wireType) -- } -- var v int -- for shift := uint(0); ; shift += 7 { -- if shift >= 64 { -- return ErrIntOverflowWire -- } -- if iNdEx >= l { -- return io.ErrUnexpectedEOF -- } -- b := dAtA[iNdEx] -- iNdEx++ -- v |= (int(b) & 0x7F) << shift -- if b < 0x80 { -- break -- } -- } -- m.Sampled = bool(v != 0) -- case 4: -- if wireType != 2 { -- return fmt.Errorf(""proto: wrong wireType = %d for field BaggageItems"", wireType) -- } -- var msglen int -- for shift := uint(0); ; shift += 7 { -- if shift >= 64 { -- return ErrIntOverflowWire -- } -- if iNdEx >= l { -- return io.ErrUnexpectedEOF -- } -- b := dAtA[iNdEx] -- iNdEx++ -- msglen |= (int(b) & 0x7F) << shift -- if b < 0x80 { -- break -- } -- } -- if msglen < 0 { -- return ErrInvalidLengthWire -- } -- postIndex := iNdEx + msglen -- if postIndex > l { -- return io.ErrUnexpectedEOF -- } -- var keykey uint64 -- for shift := uint(0); ; shift += 7 { -- if shift >= 64 { -- return ErrIntOverflowWire -- } -- if iNdEx >= l { -- return io.ErrUnexpectedEOF -- } -- b := dAtA[iNdEx] -- iNdEx++ -- keykey |= (uint64(b) & 0x7F) << shift -- if b < 0x80 { -- break -- } -- } -- var stringLenmapkey uint64 -- for shift := uint(0); ; shift += 7 { -- if shift >= 64 { -- return ErrIntOverflowWire -- } -- if iNdEx >= l { -- return io.ErrUnexpectedEOF -- } -- b := dAtA[iNdEx] -- iNdEx++ -- stringLenmapkey |= (uint64(b) & 0x7F) << shift -- if b < 0x80 { -- break -- } -- } -- intStringLenmapkey := int(stringLenmapkey) -- if intStringLenmapkey < 0 { -- return ErrInvalidLengthWire -- } -- postStringIndexmapkey := iNdEx + intStringLenmapkey -- if postStringIndexmapkey > l { -- return io.ErrUnexpectedEOF -- } -- mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) -- iNdEx = postStringIndexmapkey -- if m.BaggageItems == nil { -- m.BaggageItems = make(map[string]string) -- } -- if iNdEx < postIndex { -- var valuekey uint64 -- for shift := uint(0); ; shift += 7 { -- if shift >= 64 { -- return ErrIntOverflowWire -- } -- if iNdEx >= l { -- return io.ErrUnexpectedEOF -- } -- b := dAtA[iNdEx] -- iNdEx++ -- valuekey |= (uint64(b) & 0x7F) << shift -- if b < 0x80 { -- break -- } -- } -- var stringLenmapvalue uint64 -- for shift := uint(0); ; shift += 7 { -- if shift >= 64 { -- return ErrIntOverflowWire -- } -- if iNdEx >= l { -- return io.ErrUnexpectedEOF -- } -- b := dAtA[iNdEx] -- iNdEx++ -- stringLenmapvalue |= (uint64(b) & 0x7F) << shift -- if b < 0x80 { -- break -- } -- } -- intStringLenmapvalue := int(stringLenmapvalue) -- if intStringLenmapvalue < 0 { -- return ErrInvalidLengthWire -- } -- postStringIndexmapvalue := iNdEx + intStringLenmapvalue -- if postStringIndexmapvalue > l { -- return io.ErrUnexpectedEOF -- } -- mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) -- iNdEx = postStringIndexmapvalue -- m.BaggageItems[mapkey] = mapvalue -- } else { -- var mapvalue string -- m.BaggageItems[mapkey] = mapvalue -- } -- iNdEx = postIndex -- case 20: -- if wireType != 1 { -- return fmt.Errorf(""proto: wrong wireType = %d for field TraceIdHigh"", wireType) -- } -- m.TraceIdHigh = 0 -- if (iNdEx + 8) > l { -- return io.ErrUnexpectedEOF -- } -- iNdEx += 8 -- m.TraceIdHigh = uint64(dAtA[iNdEx-8]) -- m.TraceIdHigh |= uint64(dAtA[iNdEx-7]) << 8 -- m.TraceIdHigh |= uint64(dAtA[iNdEx-6]) << 16 -- m.TraceIdHigh |= uint64(dAtA[iNdEx-5]) << 24 -- m.TraceIdHigh |= uint64(dAtA[iNdEx-4]) << 32 -- m.TraceIdHigh |= uint64(dAtA[iNdEx-3]) << 40 -- m.TraceIdHigh |= uint64(dAtA[iNdEx-2]) << 48 -- m.TraceIdHigh |= uint64(dAtA[iNdEx-1]) << 56 -- case 21: -- if wireType != 1 { -- return fmt.Errorf(""proto: wrong wireType = %d for field ParentSpanId"", wireType) -- } -- m.ParentSpanId = 0 -- if (iNdEx + 8) > l { -- return io.ErrUnexpectedEOF -- } -- iNdEx += 8 -- m.ParentSpanId = uint64(dAtA[iNdEx-8]) -- m.ParentSpanId |= uint64(dAtA[iNdEx-7]) << 8 -- m.ParentSpanId |= uint64(dAtA[iNdEx-6]) << 16 -- m.ParentSpanId |= uint64(dAtA[iNdEx-5]) << 24 -- m.ParentSpanId |= uint64(dAtA[iNdEx-4]) << 32 -- m.ParentSpanId |= uint64(dAtA[iNdEx-3]) << 40 -- m.ParentSpanId |= uint64(dAtA[iNdEx-2]) << 48 -- m.ParentSpanId |= uint64(dAtA[iNdEx-1]) << 56 -- case 22: -- if wireType != 1 { -- return fmt.Errorf(""proto: wrong wireType = %d for field Flags"", wireType) -- } -- m.Flags = 0 -- if (iNdEx + 8) > l { -- return io.ErrUnexpectedEOF -- } -- iNdEx += 8 -- m.Flags = uint64(dAtA[iNdEx-8]) -- m.Flags |= uint64(dAtA[iNdEx-7]) << 8 -- m.Flags |= uint64(dAtA[iNdEx-6]) << 16 -- m.Flags |= uint64(dAtA[iNdEx-5]) << 24 -- m.Flags |= uint64(dAtA[iNdEx-4]) << 32 -- m.Flags |= uint64(dAtA[iNdEx-3]) << 40 -- m.Flags |= uint64(dAtA[iNdEx-2]) << 48 -- m.Flags |= uint64(dAtA[iNdEx-1]) << 56 -- default: -- iNdEx = preIndex -- skippy, err := skipWire(dAtA[iNdEx:]) -- if err != nil { -- return err -- } -- if skippy < 0 { -- return ErrInvalidLengthWire -- } -- if (iNdEx + skippy) > l { -- return io.ErrUnexpectedEOF -- } -- iNdEx += skippy -- } -- } -- -- if iNdEx > l { -- return io.ErrUnexpectedEOF -- } -- return nil --} --func skipWire(dAtA []byte) (n int, err error) { -- l := len(dAtA) -- iNdEx := 0 -- for iNdEx < l { -- var wire uint64 -- for shift := uint(0); ; shift += 7 { -- if shift >= 64 { -- return 0, ErrIntOverflowWire -- } -- if iNdEx >= l { -- return 0, io.ErrUnexpectedEOF -- } -- b := dAtA[iNdEx] -- iNdEx++ -- wire |= (uint64(b) & 0x7F) << shift -- if b < 0x80 { -- break -- } -- } -- wireType := int(wire & 0x7) -- switch wireType { -- case 0: -- for shift := uint(0); ; shift += 7 { -- if shift >= 64 { -- return 0, ErrIntOverflowWire -- } -- if iNdEx >= l { -- return 0, io.ErrUnexpectedEOF -- } -- iNdEx++ -- if dAtA[iNdEx-1] < 0x80 { -- break -- } -- } -- return iNdEx, nil -- case 1: -- iNdEx += 8 -- return iNdEx, nil -- case 2: -- var length int -- for shift := uint(0); ; shift += 7 { -- if shift >= 64 { -- return 0, ErrIntOverflowWire -- } -- if iNdEx >= l { -- return 0, io.ErrUnexpectedEOF -- } -- b := dAtA[iNdEx] -- iNdEx++ -- length |= (int(b) & 0x7F) << shift -- if b < 0x80 { -- break -- } -- } -- iNdEx += length -- if length < 0 { -- return 0, ErrInvalidLengthWire -- } -- return iNdEx, nil -- case 3: -- for { -- var innerWire uint64 -- var start int = iNdEx -- for shift := uint(0); ; shift += 7 { -- if shift >= 64 { -- return 0, ErrIntOverflowWire -- } -- if iNdEx >= l { -- return 0, io.ErrUnexpectedEOF -- } -- b := dAtA[iNdEx] -- iNdEx++ -- innerWire |= (uint64(b) & 0x7F) << shift -- if b < 0x80 { -- break -- } -- } -- innerWireType := int(innerWire & 0x7) -- if innerWireType == 4 { -- break -- } -- next, err := skipWire(dAtA[start:]) -- if err != nil { -- return 0, err -- } -- iNdEx = start + next -- } -- return iNdEx, nil -- case 4: -- return iNdEx, nil -- case 5: -- iNdEx += 4 -- return iNdEx, nil -- default: -- return 0, fmt.Errorf(""proto: illegal wireType %d"", wireType) -- } -- } -- panic(""unreachable"") --} -- --var ( -- ErrInvalidLengthWire = fmt.Errorf(""proto: negative length found during unmarshaling"") -- ErrIntOverflowWire = fmt.Errorf(""proto: integer overflow"") --) -- --func init() { proto.RegisterFile(""wire.proto"", fileDescriptorWire) } -- --var fileDescriptorWire = []byte{ -- // 325 bytes of a gzipped FileDescriptorProto -- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4a, 0x03, 0x31, -- 0x18, 0xc4, 0x4d, 0xab, 0xfd, 0xf3, 0xb5, 0x15, 0x0d, 0x55, 0x57, 0x0f, 0x4b, 0x29, 0x1e, 0x7a, -- 0xe9, 0x56, 0xec, 0x45, 0xbc, 0x08, 0x05, 0xc1, 0x5e, 0xb7, 0x1e, 0xc4, 0xcb, 0x92, 0xed, 0xc6, -- 0x6c, 0x68, 0x9b, 0x84, 0x6c, 0xaa, 0xd4, 0xa7, 0xf0, 0xb1, 0x3c, 0x7a, 0xf2, 0x2c, 0xf5, 0x45, -- 0x24, 0x89, 0x85, 0x82, 0x9e, 0x76, 0x67, 0xe6, 0x1b, 0xf8, 0x31, 0x01, 0x78, 0xe1, 0x9a, 0x46, -- 0x4a, 0x4b, 0x23, 0x71, 0xfb, 0x95, 0xab, 0x19, 0x17, 0x46, 0x93, 0x29, 0xd5, 0x09, 0x93, 0x91, -- 0xcd, 0xba, 0x9f, 0x25, 0x68, 0xdc, 0x3b, 0x6b, 0x62, 0x88, 0xa1, 0xf8, 0x14, 0x6a, 0xee, 0x22, -- 0xe1, 0x59, 0x80, 0x3a, 0xa8, 0x57, 0x89, 0xab, 0x4e, 0x8f, 0x33, 0x7c, 0x02, 0xd5, 0x42, 0x11, -- 0x61, 0x93, 0x92, 0x4b, 0x2a, 0x56, 0x8e, 0x33, 0x1c, 0x40, 0xb5, 0x20, 0x0b, 0x35, 0xa7, 0x59, -- 0x50, 0xee, 0xa0, 0x5e, 0x2d, 0xde, 0x48, 0xfc, 0x00, 0xad, 0x94, 0x30, 0x46, 0x18, 0x4d, 0xb8, -- 0xa1, 0x8b, 0x22, 0xd8, 0xed, 0x94, 0x7b, 0x8d, 0xcb, 0x61, 0xf4, 0x1f, 0x4b, 0xb4, 0xc5, 0x11, -- 0x8d, 0x7c, 0x6d, 0x6c, 0x5b, 0xb7, 0xc2, 0xe8, 0x55, 0xdc, 0x4c, 0xb7, 0x2c, 0xdc, 0x85, 0xd6, -- 0x86, 0x33, 0xc9, 0x39, 0xcb, 0x83, 0xb6, 0x43, 0x6a, 0xfc, 0xc2, 0xde, 0x71, 0x96, 0xe3, 0x73, -- 0xd8, 0x57, 0x44, 0x53, 0x61, 0x92, 0x0d, 0xf7, 0x91, 0x3b, 0x6a, 0x7a, 0x77, 0xe2, 0xe9, 0xdb, -- 0xb0, 0xf7, 0x34, 0x27, 0xac, 0x08, 0x8e, 0x5d, 0xe8, 0xc5, 0xd9, 0x0d, 0x1c, 0xfe, 0x41, 0xc0, -- 0x07, 0x50, 0x9e, 0xd1, 0x95, 0xdb, 0xa5, 0x1e, 0xdb, 0x5f, 0x5b, 0x7e, 0x26, 0xf3, 0x25, 0x75, -- 0x8b, 0xd4, 0x63, 0x2f, 0xae, 0x4b, 0x57, 0x68, 0x34, 0x7a, 0x5f, 0x87, 0xe8, 0x63, 0x1d, 0xa2, -- 0xaf, 0x75, 0x88, 0xde, 0xbe, 0xc3, 0x9d, 0xc7, 0x0b, 0xc6, 0x4d, 0xbe, 0x4c, 0xa3, 0xa9, 0x5c, -- 0x0c, 0xa4, 0xa2, 0xc2, 0x6f, 0x30, 0xf0, 0x9f, 0x3e, 0x93, 0x7d, 0x6b, 0x5a, 0x7e, 0x2e, 0xd8, -- 0xc0, 0x0e, 0x92, 0x56, 0xdc, 0xcb, 0x0d, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x3a, 0xab, 0xcc, -- 0x6b, 0xc7, 0x01, 0x00, 0x00, --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.proto b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.proto -deleted file mode 100644 -index df425d5b5de84..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.proto -+++ /dev/null -@@ -1,13 +0,0 @@ --syntax = ""proto3""; --package zipkintracer_go.wire; --option go_package = ""github.com/openzipkin/zipkin-go-opentracing/wire""; -- --message TracerState { -- fixed64 trace_id = 1; -- fixed64 span_id = 2; -- bool sampled = 3; -- map baggage_items = 4; -- fixed64 trace_id_high = 20; -- fixed64 parent_span_id = 21; -- fixed64 flags = 22; --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-endpoint.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-endpoint.go -deleted file mode 100644 -index e06ca4cbcc95f..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-endpoint.go -+++ /dev/null -@@ -1,72 +0,0 @@ --package zipkintracer -- --import ( -- ""encoding/binary"" -- ""net"" -- ""strconv"" -- ""strings"" -- -- ""github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"" --) -- --// makeEndpoint takes the hostport and service name that represent this Zipkin --// service, and returns an endpoint that's embedded into the Zipkin core Span --// type. It will return a nil endpoint if the input parameters are malformed. --func makeEndpoint(hostport, serviceName string) (ep *zipkincore.Endpoint) { -- ep = zipkincore.NewEndpoint() -- -- // Set the ServiceName -- ep.ServiceName = serviceName -- -- if strings.IndexByte(hostport, ':') < 0 { -- // """" becomes "":0"" -- hostport = hostport + "":0"" -- } -- -- // try to parse provided "":"" -- host, port, err := net.SplitHostPort(hostport) -- if err != nil { -- // if unparsable, return as ""undefined:0"" -- return -- } -- -- // try to set port number -- p, _ := strconv.ParseUint(port, 10, 16) -- ep.Port = int16(p) -- -- // if is a domain name, look it up -- addrs, err := net.LookupIP(host) -- if err != nil { -- // return as ""undefined:"" -- return -- } -- -- var addr4, addr16 net.IP -- for i := range addrs { -- addr := addrs[i].To4() -- if addr == nil { -- // IPv6 -- if addr16 == nil { -- addr16 = addrs[i].To16() // IPv6 - 16 bytes -- } -- } else { -- // IPv4 -- if addr4 == nil { -- addr4 = addr // IPv4 - 4 bytes -- } -- } -- if addr16 != nil && addr4 != nil { -- // IPv4 & IPv6 have been set, we can stop looking further -- break -- } -- } -- // default to 0 filled 4 byte array for IPv4 if IPv6 only host was found -- if addr4 == nil { -- addr4 = make([]byte, 4) -- } -- -- // set IPv4 and IPv6 addresses -- ep.Ipv4 = (int32)(binary.BigEndian.Uint32(addr4)) -- ep.Ipv6 = []byte(addr16) -- return --} -diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-recorder.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-recorder.go -deleted file mode 100644 -index 42600a99a9e70..0000000000000 ---- a/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-recorder.go -+++ /dev/null -@@ -1,218 +0,0 @@ --package zipkintracer -- --import ( -- ""encoding/binary"" -- ""fmt"" -- ""net"" -- ""strconv"" -- ""time"" -- -- otext ""github.com/opentracing/opentracing-go/ext"" -- ""github.com/opentracing/opentracing-go/log"" -- -- ""github.com/openzipkin/zipkin-go-opentracing/flag"" -- ""github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"" --) -- --var ( -- // SpanKindResource will be regarded as a SA annotation by Zipkin. -- SpanKindResource = otext.SpanKindEnum(""resource"") --) -- --// Recorder implements the SpanRecorder interface. --type Recorder struct { -- collector Collector -- debug bool -- endpoint *zipkincore.Endpoint -- materializer func(logFields []log.Field) ([]byte, error) --} -- --// RecorderOption allows for functional options. --type RecorderOption func(r *Recorder) -- --// WithLogFmtMaterializer will convert OpenTracing Log fields to a LogFmt representation. --func WithLogFmtMaterializer() RecorderOption { -- return func(r *Recorder) { -- r.materializer = MaterializeWithLogFmt -- } --} -- --// WithJSONMaterializer will convert OpenTracing Log fields to a JSON representation. --func WithJSONMaterializer() RecorderOption { -- return func(r *Recorder) { -- r.materializer = MaterializeWithJSON -- } --} -- --// WithStrictMaterializer will only record event Log fields and discard the rest. --func WithStrictMaterializer() RecorderOption { -- return func(r *Recorder) { -- r.materializer = StrictZipkinMaterializer -- } --} -- --// NewRecorder creates a new Zipkin Recorder backed by the provided Collector. --// --// hostPort and serviceName allow you to set the default Zipkin endpoint --// information which will be added to the application's standard core --// annotations. hostPort will be resolved into an IPv4 and/or IPv6 address and --// Port number, serviceName will be used as the application's service --// identifier. --// --// If application does not listen for incoming requests or an endpoint Context --// does not involve network address and/or port these cases can be solved like --// this: --// # port is not applicable: --// NewRecorder(c, debug, ""192.168.1.12:0"", ""ServiceA"") --// --// # network address and port are not applicable: --// NewRecorder(c, debug, ""0.0.0.0:0"", ""ServiceB"") --func NewRecorder(c Collector, debug bool, hostPort, serviceName string, options ...RecorderOption) SpanRecorder { -- r := &Recorder{ -- collector: c, -- debug: debug, -- endpoint: makeEndpoint(hostPort, serviceName), -- materializer: MaterializeWithLogFmt, -- } -- for _, opts := range options { -- opts(r) -- } -- return r --} -- --// RecordSpan converts a RawSpan into the Zipkin representation of a span --// and records it to the underlying collector. --func (r *Recorder) RecordSpan(sp RawSpan) { -- if !sp.Context.Sampled { -- return -- } -- -- var parentSpanID *int64 -- if sp.Context.ParentSpanID != nil { -- id := int64(*sp.Context.ParentSpanID) -- parentSpanID = &id -- } -- -- var traceIDHigh *int64 -- if sp.Context.TraceID.High > 0 { -- tidh := int64(sp.Context.TraceID.High) -- traceIDHigh = &tidh -- } -- -- span := &zipkincore.Span{ -- Name: sp.Operation, -- ID: int64(sp.Context.SpanID), -- TraceID: int64(sp.Context.TraceID.Low), -- TraceIDHigh: traceIDHigh, -- ParentID: parentSpanID, -- Debug: r.debug || (sp.Context.Flags&flag.Debug == flag.Debug), -- } -- // only send timestamp and duration if this process owns the current span. -- if sp.Context.Owner { -- timestamp := sp.Start.UnixNano() / 1e3 -- duration := sp.Duration.Nanoseconds() / 1e3 -- // since we always time our spans we will round up to 1 microsecond if the -- // span took less. -- if duration == 0 { -- duration = 1 -- } -- span.Timestamp = ×tamp -- span.Duration = &duration -- } -- if kind, ok := sp.Tags[string(otext.SpanKind)]; ok { -- switch kind { -- case otext.SpanKindRPCClient, otext.SpanKindRPCClientEnum: -- annotate(span, sp.Start, zipkincore.CLIENT_SEND, r.endpoint) -- annotate(span, sp.Start.Add(sp.Duration), zipkincore.CLIENT_RECV, r.endpoint) -- case otext.SpanKindRPCServer, otext.SpanKindRPCServerEnum: -- annotate(span, sp.Start, zipkincore.SERVER_RECV, r.endpoint) -- annotate(span, sp.Start.Add(sp.Duration), zipkincore.SERVER_SEND, r.endpoint) -- case SpanKindResource: -- serviceName, ok := sp.Tags[string(otext.PeerService)] -- if !ok { -- serviceName = r.endpoint.GetServiceName() -- } -- host, ok := sp.Tags[string(otext.PeerHostname)].(string) -- if !ok { -- if r.endpoint.GetIpv4() > 0 { -- ip := make([]byte, 4) -- binary.BigEndian.PutUint32(ip, uint32(r.endpoint.GetIpv4())) -- host = net.IP(ip).To4().String() -- } else { -- ip := r.endpoint.GetIpv6() -- host = net.IP(ip).String() -- } -- } -- var sPort string -- port, ok := sp.Tags[string(otext.PeerPort)] -- if !ok { -- sPort = strconv.FormatInt(int64(r.endpoint.GetPort()), 10) -- } else { -- sPort = strconv.FormatInt(int64(port.(uint16)), 10) -- } -- re := makeEndpoint(net.JoinHostPort(host, sPort), serviceName.(string)) -- if re != nil { -- annotateBinary(span, zipkincore.SERVER_ADDR, serviceName, re) -- } else { -- fmt.Printf(""endpoint creation failed: host: %q port: %q"", host, sPort) -- } -- annotate(span, sp.Start, zipkincore.CLIENT_SEND, r.endpoint) -- annotate(span, sp.Start.Add(sp.Duration), zipkincore.CLIENT_RECV, r.endpoint) -- default: -- annotateBinary(span, zipkincore.LOCAL_COMPONENT, r.endpoint.GetServiceName(), r.endpoint) -- } -- delete(sp.Tags, string(otext.SpanKind)) -- } else { -- annotateBinary(span, zipkincore.LOCAL_COMPONENT, r.endpoint.GetServiceName(), r.endpoint) -- } -- -- for key, value := range sp.Tags { -- annotateBinary(span, key, value, r.endpoint) -- } -- -- for _, spLog := range sp.Logs { -- if len(spLog.Fields) == 1 && spLog.Fields[0].Key() == ""event"" { -- // proper Zipkin annotation -- annotate(span, spLog.Timestamp, fmt.Sprintf(""%+v"", spLog.Fields[0].Value()), r.endpoint) -- continue -- } -- // OpenTracing Log with key-value pair(s). Try to materialize using the -- // materializer chosen for the recorder. -- if logs, err := r.materializer(spLog.Fields); err != nil { -- fmt.Printf(""Materialization of OpenTracing LogFields failed: %+v"", err) -- } else { -- annotate(span, spLog.Timestamp, string(logs), r.endpoint) -- } -- } -- _ = r.collector.Collect(span) --} -- --// annotate annotates the span with the given value. --func annotate(span *zipkincore.Span, timestamp time.Time, value string, host *zipkincore.Endpoint) { -- if timestamp.IsZero() { -- timestamp = time.Now() -- } -- span.Annotations = append(span.Annotations, &zipkincore.Annotation{ -- Timestamp: timestamp.UnixNano() / 1e3, -- Value: value, -- Host: host, -- }) --} -- --// annotateBinary annotates the span with a key and a value that will be []byte --// encoded. --func annotateBinary(span *zipkincore.Span, key string, value interface{}, host *zipkincore.Endpoint) { -- if b, ok := value.(bool); ok { -- if b { -- value = ""true"" -- } else { -- value = ""false"" -- } -- } -- span.BinaryAnnotations = append(span.BinaryAnnotations, &zipkincore.BinaryAnnotation{ -- Key: key, -- Value: []byte(fmt.Sprintf(""%+v"", value)), -- AnnotationType: zipkincore.AnnotationType_STRING, -- Host: host, -- }) --} -diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore -deleted file mode 100644 -index c2bb6e4af122a..0000000000000 ---- a/vendor/github.com/pierrec/lz4/.gitignore -+++ /dev/null -@@ -1,31 +0,0 @@ --# Created by https://www.gitignore.io/api/macos -- --### macOS ### --*.DS_Store --.AppleDouble --.LSOverride -- --# Icon must end with two \r --Icon -- -- --# Thumbnails --._* -- --# Files that might appear in the root of a volume --.DocumentRevisions-V100 --.fseventsd --.Spotlight-V100 --.TemporaryItems --.Trashes --.VolumeIcon.icns --.com.apple.timemachine.donotpresent -- --# Directories potentially created on remote AFP share --.AppleDB --.AppleDesktop --Network Trash Folder --Temporary Items --.apdisk -- --# End of https://www.gitignore.io/api/macos -diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml -deleted file mode 100644 -index 78be21cc822dc..0000000000000 ---- a/vendor/github.com/pierrec/lz4/.travis.yml -+++ /dev/null -@@ -1,8 +0,0 @@ --language: go -- --go: -- - 1.x -- --script: -- - go test -v -cpu=2 -- - go test -v -cpu=2 -race -\ No newline at end of file -diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE -deleted file mode 100644 -index bd899d8353dd5..0000000000000 ---- a/vendor/github.com/pierrec/lz4/LICENSE -+++ /dev/null -@@ -1,28 +0,0 @@ --Copyright (c) 2015, Pierre Curto --All rights reserved. -- --Redistribution and use in source and binary forms, with or without --modification, are permitted provided that the following conditions are met: -- --* Redistributions of source code must retain the above copyright notice, this -- list of conditions and the following disclaimer. -- --* Redistributions in binary form must reproduce the above copyright notice, -- this list of conditions and the following disclaimer in the documentation -- and/or other materials provided with the distribution. -- --* Neither the name of xxHash nor the names of its -- contributors may be used to endorse or promote products derived from -- this software without specific prior written permission. -- --THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ""AS IS"" --AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE --IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE --DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE --FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL --DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR --SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER --CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, --OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE --OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -- -diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md -deleted file mode 100644 -index dd3c9d47e18bd..0000000000000 ---- a/vendor/github.com/pierrec/lz4/README.md -+++ /dev/null -@@ -1,31 +0,0 @@ --[![godoc](https://godoc.org/github.com/pierrec/lz4?status.png)](https://godoc.org/github.com/pierrec/lz4) --[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4) -- --# lz4 --LZ4 compression and decompression in pure Go -- --## Usage -- --```go --import ""github.com/pierrec/lz4"" --``` -- --## Description -- --Package lz4 implements reading and writing lz4 compressed data (a frame), --as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html, --using an io.Reader (decompression) and io.Writer (compression). --It is designed to minimize memory usage while maximizing throughput by being able to --[de]compress data concurrently. -- --The Reader and the Writer support concurrent processing provided the supplied buffers are --large enough (in multiples of BlockMaxSize) and there is no block dependency. --Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently. --The runtime.GOMAXPROCS() value is used to apply concurrency or not. -- --Although the block level compression and decompression functions are exposed and are fully compatible --with the lz4 block format definition, they are low level and should not be used directly. --For a complete description of an lz4 compressed block, see: --http://fastcompression.blogspot.fr/2011/05/lz4-explained.html -- --See https://github.com/Cyan4973/lz4 for the reference C implementation. -diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go -deleted file mode 100644 -index 44e3eaaac798f..0000000000000 ---- a/vendor/github.com/pierrec/lz4/block.go -+++ /dev/null -@@ -1,454 +0,0 @@ --package lz4 -- --import ( -- ""encoding/binary"" -- ""errors"" --) -- --// block represents a frame data block. --// Used when compressing or decompressing frame blocks concurrently. --type block struct { -- compressed bool -- zdata []byte // compressed data -- data []byte // decompressed data -- offset int // offset within the data as with block dependency the 64Kb window is prepended to it -- checksum uint32 // compressed data checksum -- err error // error while [de]compressing --} -- --var ( -- // ErrInvalidSource is returned by UncompressBlock when a compressed block is corrupted. -- ErrInvalidSource = errors.New(""lz4: invalid source"") -- // ErrShortBuffer is returned by UncompressBlock, CompressBlock or CompressBlockHC when -- // the supplied buffer for [de]compression is too small. -- ErrShortBuffer = errors.New(""lz4: short buffer"") --) -- --// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. --func CompressBlockBound(n int) int { -- return n + n/255 + 16 --} -- --// UncompressBlock decompresses the source buffer into the destination one, --// starting at the di index and returning the decompressed size. --// --// The destination buffer must be sized appropriately. --// --// An error is returned if the source data is invalid or the destination buffer is too small. --func UncompressBlock(src, dst []byte, di int) (int, error) { -- si, sn, di0 := 0, len(src), di -- if sn == 0 { -- return 0, nil -- } -- -- for { -- // literals and match lengths (token) -- lLen := int(src[si] >> 4) -- mLen := int(src[si] & 0xF) -- if si++; si == sn { -- return di, ErrInvalidSource -- } -- -- // literals -- if lLen > 0 { -- if lLen == 0xF { -- for src[si] == 0xFF { -- lLen += 0xFF -- if si++; si == sn { -- return di - di0, ErrInvalidSource -- } -- } -- lLen += int(src[si]) -- if si++; si == sn { -- return di - di0, ErrInvalidSource -- } -- } -- if len(dst)-di < lLen || si+lLen > sn { -- return di - di0, ErrShortBuffer -- } -- di += copy(dst[di:], src[si:si+lLen]) -- -- if si += lLen; si >= sn { -- return di - di0, nil -- } -- } -- -- if si += 2; si >= sn { -- return di, ErrInvalidSource -- } -- offset := int(src[si-2]) | int(src[si-1])<<8 -- if di-offset < 0 || offset == 0 { -- return di - di0, ErrInvalidSource -- } -- -- // match -- if mLen == 0xF { -- for src[si] == 0xFF { -- mLen += 0xFF -- if si++; si == sn { -- return di - di0, ErrInvalidSource -- } -- } -- mLen += int(src[si]) -- if si++; si == sn { -- return di - di0, ErrInvalidSource -- } -- } -- // minimum match length is 4 -- mLen += 4 -- if len(dst)-di <= mLen { -- return di - di0, ErrShortBuffer -- } -- -- // copy the match (NB. match is at least 4 bytes long) -- if mLen >= offset { -- bytesToCopy := offset * (mLen / offset) -- // Efficiently copy the match dst[di-offset:di] into the slice -- // dst[di:di+bytesToCopy] -- expanded := dst[di-offset : di+bytesToCopy] -- n := offset -- for n <= bytesToCopy+offset { -- copy(expanded[n:], expanded[:n]) -- n *= 2 -- } -- di += bytesToCopy -- mLen -= bytesToCopy -- } -- -- di += copy(dst[di:], dst[di-offset:di-offset+mLen]) -- } --} -- --// CompressBlock compresses the source buffer starting at soffet into the destination one. --// This is the fast version of LZ4 compression and also the default one. --// --// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible. --// --// An error is returned if the destination buffer is too small. --func CompressBlock(src, dst []byte, soffset int) (int, error) { -- sn, dn := len(src)-mfLimit, len(dst) -- if sn <= 0 || dn == 0 || soffset >= sn { -- return 0, nil -- } -- var si, di int -- -- // fast scan strategy: -- // we only need a hash table to store the last sequences (4 bytes) -- var hashTable [1 << hashLog]int -- var hashShift = uint((minMatch * 8) - hashLog) -- -- // Initialise the hash table with the first 64Kb of the input buffer -- // (used when compressing dependent blocks) -- for si < soffset { -- h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift -- si++ -- hashTable[h] = si -- } -- -- anchor := si -- fma := 1 << skipStrength -- for si < sn-minMatch { -- // hash the next 4 bytes (sequence)... -- h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift -- // -1 to separate existing entries from new ones -- ref := hashTable[h] - 1 -- // ...and store the position of the hash in the hash table (+1 to compensate the -1 upon saving) -- hashTable[h] = si + 1 -- // no need to check the last 3 bytes in the first literal 4 bytes as -- // this guarantees that the next match, if any, is compressed with -- // a lower size, since to have some compression we must have: -- // ll+ml-overlap > 1 + (ll-15)/255 + (ml-4-15)/255 + 2 (uncompressed size>compressed size) -- // => ll+ml>3+2*overlap => ll+ml>= 4+2*overlap -- // and by definition we do have: -- // ll >= 1, ml >= 4 -- // => ll+ml >= 5 -- // => so overlap must be 0 -- -- // the sequence is new, out of bound (64kb) or not valid: try next sequence -- if ref < 0 || fma&(1<>winSizeLog > 0 || -- src[ref] != src[si] || -- src[ref+1] != src[si+1] || -- src[ref+2] != src[si+2] || -- src[ref+3] != src[si+3] { -- // variable step: improves performance on non-compressible data -- si += fma >> skipStrength -- fma++ -- continue -- } -- // match found -- fma = 1 << skipStrength -- lLen := si - anchor -- offset := si - ref -- -- // encode match length part 1 -- si += minMatch -- mLen := si // match length has minMatch already -- for si <= sn && src[si] == src[si-offset] { -- si++ -- } -- mLen = si - mLen -- if mLen < 0xF { -- dst[di] = byte(mLen) -- } else { -- dst[di] = 0xF -- } -- -- // encode literals length -- if lLen < 0xF { -- dst[di] |= byte(lLen << 4) -- } else { -- dst[di] |= 0xF0 -- if di++; di == dn { -- return di, ErrShortBuffer -- } -- l := lLen - 0xF -- for ; l >= 0xFF; l -= 0xFF { -- dst[di] = 0xFF -- if di++; di == dn { -- return di, ErrShortBuffer -- } -- } -- dst[di] = byte(l) -- } -- if di++; di == dn { -- return di, ErrShortBuffer -- } -- -- // literals -- if di+lLen >= dn { -- return di, ErrShortBuffer -- } -- di += copy(dst[di:], src[anchor:anchor+lLen]) -- anchor = si -- -- // encode offset -- if di += 2; di >= dn { -- return di, ErrShortBuffer -- } -- dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) -- -- // encode match length part 2 -- if mLen >= 0xF { -- for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { -- dst[di] = 0xFF -- if di++; di == dn { -- return di, ErrShortBuffer -- } -- } -- dst[di] = byte(mLen) -- if di++; di == dn { -- return di, ErrShortBuffer -- } -- } -- } -- -- if anchor == 0 { -- // incompressible -- return 0, nil -- } -- -- // last literals -- lLen := len(src) - anchor -- if lLen < 0xF { -- dst[di] = byte(lLen << 4) -- } else { -- dst[di] = 0xF0 -- if di++; di == dn { -- return di, ErrShortBuffer -- } -- lLen -= 0xF -- for ; lLen >= 0xFF; lLen -= 0xFF { -- dst[di] = 0xFF -- if di++; di == dn { -- return di, ErrShortBuffer -- } -- } -- dst[di] = byte(lLen) -- } -- if di++; di == dn { -- return di, ErrShortBuffer -- } -- -- // write literals -- src = src[anchor:] -- switch n := di + len(src); { -- case n > dn: -- return di, ErrShortBuffer -- case n >= sn: -- // incompressible -- return 0, nil -- } -- di += copy(dst[di:], src) -- return di, nil --} -- --// CompressBlockHC compresses the source buffer starting at soffet into the destination one. --// CompressBlockHC compression ratio is better than CompressBlock but it is also slower. --// --// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible. --// --// An error is returned if the destination buffer is too small. --func CompressBlockHC(src, dst []byte, soffset int) (int, error) { -- sn, dn := len(src)-mfLimit, len(dst) -- if sn <= 0 || dn == 0 || soffset >= sn { -- return 0, nil -- } -- var si, di int -- -- // Hash Chain strategy: -- // we need a hash table and a chain table -- // the chain table cannot contain more entries than the window size (64Kb entries) -- var hashTable [1 << hashLog]int -- var chainTable [winSize]int -- var hashShift = uint((minMatch * 8) - hashLog) -- -- // Initialise the hash table with the first 64Kb of the input buffer -- // (used when compressing dependent blocks) -- for si < soffset { -- h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift -- chainTable[si&winMask] = hashTable[h] -- si++ -- hashTable[h] = si -- } -- -- anchor := si -- for si < sn-minMatch { -- // hash the next 4 bytes (sequence)... -- h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift -- -- // follow the chain until out of window and give the longest match -- mLen := 0 -- offset := 0 -- for next := hashTable[h] - 1; next > 0 && next > si-winSize; next = chainTable[next&winMask] - 1 { -- // the first (mLen==0) or next byte (mLen>=minMatch) at current match length must match to improve on the match length -- if src[next+mLen] == src[si+mLen] { -- for ml := 0; ; ml++ { -- if src[next+ml] != src[si+ml] || si+ml > sn { -- // found a longer match, keep its position and length -- if mLen < ml && ml >= minMatch { -- mLen = ml -- offset = si - next -- } -- break -- } -- } -- } -- } -- chainTable[si&winMask] = hashTable[h] -- hashTable[h] = si + 1 -- -- // no match found -- if mLen == 0 { -- si++ -- continue -- } -- -- // match found -- // update hash/chain tables with overlaping bytes: -- // si already hashed, add everything from si+1 up to the match length -- for si, ml := si+1, si+mLen; si < ml; { -- h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift -- chainTable[si&winMask] = hashTable[h] -- si++ -- hashTable[h] = si -- } -- -- lLen := si - anchor -- si += mLen -- mLen -= minMatch // match length does not include minMatch -- -- if mLen < 0xF { -- dst[di] = byte(mLen) -- } else { -- dst[di] = 0xF -- } -- -- // encode literals length -- if lLen < 0xF { -- dst[di] |= byte(lLen << 4) -- } else { -- dst[di] |= 0xF0 -- if di++; di == dn { -- return di, ErrShortBuffer -- } -- l := lLen - 0xF -- for ; l >= 0xFF; l -= 0xFF { -- dst[di] = 0xFF -- if di++; di == dn { -- return di, ErrShortBuffer -- } -- } -- dst[di] = byte(l) -- } -- if di++; di == dn { -- return di, ErrShortBuffer -- } -- -- // literals -- if di+lLen >= dn { -- return di, ErrShortBuffer -- } -- di += copy(dst[di:], src[anchor:anchor+lLen]) -- anchor = si -- -- // encode offset -- if di += 2; di >= dn { -- return di, ErrShortBuffer -- } -- dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) -- -- // encode match length part 2 -- if mLen >= 0xF { -- for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { -- dst[di] = 0xFF -- if di++; di == dn { -- return di, ErrShortBuffer -- } -- } -- dst[di] = byte(mLen) -- if di++; di == dn { -- return di, ErrShortBuffer -- } -- } -- } -- -- if anchor == 0 { -- // incompressible -- return 0, nil -- } -- -- // last literals -- lLen := len(src) - anchor -- if lLen < 0xF { -- dst[di] = byte(lLen << 4) -- } else { -- dst[di] = 0xF0 -- if di++; di == dn { -- return di, ErrShortBuffer -- } -- lLen -= 0xF -- for ; lLen >= 0xFF; lLen -= 0xFF { -- dst[di] = 0xFF -- if di++; di == dn { -- return di, ErrShortBuffer -- } -- } -- dst[di] = byte(lLen) -- } -- if di++; di == dn { -- return di, ErrShortBuffer -- } -- -- // write literals -- src = src[anchor:] -- switch n := di + len(src); { -- case n > dn: -- return di, ErrShortBuffer -- case n >= sn: -- // incompressible -- return 0, nil -- } -- di += copy(dst[di:], src) -- return di, nil --} -diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go -deleted file mode 100644 -index ddb82f66f8dd3..0000000000000 ---- a/vendor/github.com/pierrec/lz4/lz4.go -+++ /dev/null -@@ -1,105 +0,0 @@ --// Package lz4 implements reading and writing lz4 compressed data (a frame), --// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html, --// using an io.Reader (decompression) and io.Writer (compression). --// It is designed to minimize memory usage while maximizing throughput by being able to --// [de]compress data concurrently. --// --// The Reader and the Writer support concurrent processing provided the supplied buffers are --// large enough (in multiples of BlockMaxSize) and there is no block dependency. --// Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently. --// The runtime.GOMAXPROCS() value is used to apply concurrency or not. --// --// Although the block level compression and decompression functions are exposed and are fully compatible --// with the lz4 block format definition, they are low level and should not be used directly. --// For a complete description of an lz4 compressed block, see: --// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html --// --// See https://github.com/Cyan4973/lz4 for the reference C implementation. --package lz4 -- --import ( -- ""hash"" -- ""sync"" -- -- ""github.com/pierrec/xxHash/xxHash32"" --) -- --const ( -- // Extension is the LZ4 frame file name extension -- Extension = "".lz4"" -- // Version is the LZ4 frame format version -- Version = 1 -- -- frameMagic = uint32(0x184D2204) -- frameSkipMagic = uint32(0x184D2A50) -- -- // The following constants are used to setup the compression algorithm. -- minMatch = 4 // the minimum size of the match sequence size (4 bytes) -- winSizeLog = 16 // LZ4 64Kb window size limit -- winSize = 1 << winSizeLog -- winMask = winSize - 1 // 64Kb window of previous data for dependent blocks -- -- // hashLog determines the size of the hash table used to quickly find a previous match position. -- // Its value influences the compression speed and memory usage, the lower the faster, -- // but at the expense of the compression ratio. -- // 16 seems to be the best compromise. -- hashLog = 16 -- hashTableSize = 1 << hashLog -- hashShift = uint((minMatch * 8) - hashLog) -- -- mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes. -- skipStrength = 6 // variable step for fast scan -- -- hasher = uint32(2654435761) // prime number used to hash minMatch --) -- --// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. --var bsMapID = map[byte]int{4: 64 << 10, 5: 256 << 10, 6: 1 << 20, 7: 4 << 20} --var bsMapValue = map[int]byte{} -- --// Reversed. --func init() { -- for i, v := range bsMapID { -- bsMapValue[v] = i -- } --} -- --// Header describes the various flags that can be set on a Writer or obtained from a Reader. --// The default values match those of the LZ4 frame format definition (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). --// --// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. --// It is the caller responsibility to check them if necessary (typically when using the Reader concurrency). --type Header struct { -- BlockDependency bool // compressed blocks are dependent (one block depends on the last 64Kb of the previous one) -- BlockChecksum bool // compressed blocks are checksumed -- NoChecksum bool // frame checksum -- BlockMaxSize int // the size of the decompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. -- Size uint64 // the frame total size. It is _not_ computed by the Writer. -- HighCompression bool // use high compression (only for the Writer) -- done bool // whether the descriptor was processed (Read or Write and checked) -- // Removed as not supported -- // Dict bool // a dictionary id is to be used -- // DictID uint32 // the dictionary id read from the frame, if any. --} -- --// xxhPool wraps the standard pool for xxHash items. --// Putting items back in the pool automatically resets them. --type xxhPool struct { -- sync.Pool --} -- --func (p *xxhPool) Get() hash.Hash32 { -- return p.Pool.Get().(hash.Hash32) --} -- --func (p *xxhPool) Put(h hash.Hash32) { -- h.Reset() -- p.Pool.Put(h) --} -- --// hashPool is used by readers and writers and contains xxHash items. --var hashPool = xxhPool{ -- Pool: sync.Pool{ -- New: func() interface{} { return xxHash32.New(0) }, -- }, --} -diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go -deleted file mode 100644 -index 9f7fd60424f06..0000000000000 ---- a/vendor/github.com/pierrec/lz4/reader.go -+++ /dev/null -@@ -1,364 +0,0 @@ --package lz4 -- --import ( -- ""encoding/binary"" -- ""errors"" -- ""fmt"" -- ""hash"" -- ""io"" -- ""io/ioutil"" -- ""runtime"" -- ""sync"" -- ""sync/atomic"" --) -- --// ErrInvalid is returned when the data being read is not an LZ4 archive --// (LZ4 magic number detection failed). --var ErrInvalid = errors.New(""invalid lz4 data"") -- --// errEndOfBlock is returned by readBlock when it has reached the last block of the frame. --// It is not an error. --var errEndOfBlock = errors.New(""end of block"") -- --// Reader implements the LZ4 frame decoder. --// The Header is set after the first call to Read(). --// The Header may change between Read() calls in case of concatenated frames. --type Reader struct { -- Pos int64 // position within the source -- Header -- src io.Reader -- checksum hash.Hash32 // frame hash -- wg sync.WaitGroup // decompressing go routine wait group -- data []byte // buffered decompressed data -- window []byte // 64Kb decompressed data window --} -- --// NewReader returns a new LZ4 frame decoder. --// No access to the underlying io.Reader is performed. --func NewReader(src io.Reader) *Reader { -- return &Reader{ -- src: src, -- checksum: hashPool.Get(), -- } --} -- --// readHeader checks the frame magic number and parses the frame descriptoz. --// Skippable frames are supported even as a first frame although the LZ4 --// specifications recommends skippable frames not to be used as first frames. --func (z *Reader) readHeader(first bool) error { -- defer z.checksum.Reset() -- -- for { -- var magic uint32 -- if err := binary.Read(z.src, binary.LittleEndian, &magic); err != nil { -- if !first && err == io.ErrUnexpectedEOF { -- return io.EOF -- } -- return err -- } -- z.Pos += 4 -- if magic>>8 == frameSkipMagic>>8 { -- var skipSize uint32 -- if err := binary.Read(z.src, binary.LittleEndian, &skipSize); err != nil { -- return err -- } -- z.Pos += 4 -- m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) -- z.Pos += m -- if err != nil { -- return err -- } -- continue -- } -- if magic != frameMagic { -- return ErrInvalid -- } -- break -- } -- -- // header -- var buf [8]byte -- if _, err := io.ReadFull(z.src, buf[:2]); err != nil { -- return err -- } -- z.Pos += 2 -- -- b := buf[0] -- if b>>6 != Version { -- return fmt.Errorf(""lz4.Read: invalid version: got %d expected %d"", b>>6, Version) -- } -- z.BlockDependency = b>>5&1 == 0 -- z.BlockChecksum = b>>4&1 > 0 -- frameSize := b>>3&1 > 0 -- z.NoChecksum = b>>2&1 == 0 -- // z.Dict = b&1 > 0 -- -- bmsID := buf[1] >> 4 & 0x7 -- bSize, ok := bsMapID[bmsID] -- if !ok { -- return fmt.Errorf(""lz4.Read: invalid block max size: %d"", bmsID) -- } -- z.BlockMaxSize = bSize -- -- z.checksum.Write(buf[0:2]) -- -- if frameSize { -- if err := binary.Read(z.src, binary.LittleEndian, &z.Size); err != nil { -- return err -- } -- z.Pos += 8 -- binary.LittleEndian.PutUint64(buf[:], z.Size) -- z.checksum.Write(buf[0:8]) -- } -- -- // if z.Dict { -- // if err := binary.Read(z.src, binary.LittleEndian, &z.DictID); err != nil { -- // return err -- // } -- // z.Pos += 4 -- // binary.LittleEndian.PutUint32(buf[:], z.DictID) -- // z.checksum.Write(buf[0:4]) -- // } -- -- // header checksum -- if _, err := io.ReadFull(z.src, buf[:1]); err != nil { -- return err -- } -- z.Pos++ -- if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] { -- return fmt.Errorf(""lz4.Read: invalid header checksum: got %v expected %v"", buf[0], h) -- } -- -- z.Header.done = true -- -- return nil --} -- --// Read decompresses data from the underlying source into the supplied buffer. --// --// Since there can be multiple streams concatenated, Header values may --// change between calls to Read(). If that is the case, no data is actually read from --// the underlying io.Reader, to allow for potential input buffer resizing. --// --// Data is buffered if the input buffer is too small, and exhausted upon successive calls. --// --// If the buffer is large enough (typically in multiples of BlockMaxSize) and there is --// no block dependency, then the data will be decompressed concurrently based on the GOMAXPROCS value. --func (z *Reader) Read(buf []byte) (n int, err error) { -- if !z.Header.done { -- if err = z.readHeader(true); err != nil { -- return -- } -- } -- -- if len(buf) == 0 { -- return -- } -- -- // exhaust remaining data from previous Read() -- if len(z.data) > 0 { -- n = copy(buf, z.data) -- z.data = z.data[n:] -- if len(z.data) == 0 { -- z.data = nil -- } -- return -- } -- -- // Break up the input buffer into BlockMaxSize blocks with at least one block. -- // Then decompress into each of them concurrently if possible (no dependency). -- // In case of dependency, the first block will be missing the window (except on the -- // very first call), the rest will have it already since it comes from the previous block. -- wbuf := buf -- zn := (len(wbuf) + z.BlockMaxSize - 1) / z.BlockMaxSize -- zblocks := make([]block, zn) -- for zi, abort := 0, uint32(0); zi < zn && atomic.LoadUint32(&abort) == 0; zi++ { -- zb := &zblocks[zi] -- // last block may be too small -- if len(wbuf) < z.BlockMaxSize+len(z.window) { -- wbuf = make([]byte, z.BlockMaxSize+len(z.window)) -- } -- copy(wbuf, z.window) -- if zb.err = z.readBlock(wbuf, zb); zb.err != nil { -- break -- } -- wbuf = wbuf[z.BlockMaxSize:] -- if !z.BlockDependency { -- z.wg.Add(1) -- go z.decompressBlock(zb, &abort) -- continue -- } -- // cannot decompress concurrently when dealing with block dependency -- z.decompressBlock(zb, nil) -- // the last block may not contain enough data -- if len(z.window) == 0 { -- z.window = make([]byte, winSize) -- } -- if len(zb.data) >= winSize { -- copy(z.window, zb.data[len(zb.data)-winSize:]) -- } else { -- copy(z.window, z.window[len(zb.data):]) -- copy(z.window[len(zb.data)+1:], zb.data) -- } -- } -- z.wg.Wait() -- -- // since a block size may be less then BlockMaxSize, trim the decompressed buffers -- for _, zb := range zblocks { -- if zb.err != nil { -- if zb.err == errEndOfBlock { -- return n, z.close() -- } -- return n, zb.err -- } -- bLen := len(zb.data) -- if !z.NoChecksum { -- z.checksum.Write(zb.data) -- } -- m := copy(buf[n:], zb.data) -- // buffer the remaining data (this is necessarily the last block) -- if m < bLen { -- z.data = zb.data[m:] -- } -- n += m -- } -- -- return --} -- --// readBlock reads an entire frame block from the frame. --// The input buffer is the one that will receive the decompressed data. --// If the end of the frame is detected, it returns the errEndOfBlock error. --func (z *Reader) readBlock(buf []byte, b *block) error { -- var bLen uint32 -- if err := binary.Read(z.src, binary.LittleEndian, &bLen); err != nil { -- return err -- } -- atomic.AddInt64(&z.Pos, 4) -- -- switch { -- case bLen == 0: -- return errEndOfBlock -- case bLen&(1<<31) == 0: -- b.compressed = true -- b.data = buf -- b.zdata = make([]byte, bLen) -- default: -- bLen = bLen & (1<<31 - 1) -- if int(bLen) > len(buf) { -- return fmt.Errorf(""lz4.Read: invalid block size: %d"", bLen) -- } -- b.data = buf[:bLen] -- b.zdata = buf[:bLen] -- } -- if _, err := io.ReadFull(z.src, b.zdata); err != nil { -- return err -- } -- -- if z.BlockChecksum { -- if err := binary.Read(z.src, binary.LittleEndian, &b.checksum); err != nil { -- return err -- } -- xxh := hashPool.Get() -- defer hashPool.Put(xxh) -- xxh.Write(b.zdata) -- if h := xxh.Sum32(); h != b.checksum { -- return fmt.Errorf(""lz4.Read: invalid block checksum: got %x expected %x"", h, b.checksum) -- } -- } -- -- return nil --} -- --// decompressBlock decompresses a frame block. --// In case of an error, the block err is set with it and abort is set to 1. --func (z *Reader) decompressBlock(b *block, abort *uint32) { -- if abort != nil { -- defer z.wg.Done() -- } -- if b.compressed { -- n := len(z.window) -- m, err := UncompressBlock(b.zdata, b.data, n) -- if err != nil { -- if abort != nil { -- atomic.StoreUint32(abort, 1) -- } -- b.err = err -- return -- } -- b.data = b.data[n : n+m] -- } -- atomic.AddInt64(&z.Pos, int64(len(b.data))) --} -- --// close validates the frame checksum (if any) and checks the next frame (if any). --func (z *Reader) close() error { -- if !z.NoChecksum { -- var checksum uint32 -- if err := binary.Read(z.src, binary.LittleEndian, &checksum); err != nil { -- return err -- } -- if checksum != z.checksum.Sum32() { -- return fmt.Errorf(""lz4.Read: invalid frame checksum: got %x expected %x"", z.checksum.Sum32(), checksum) -- } -- } -- -- // get ready for the next concatenated frame, but do not change the position -- pos := z.Pos -- z.Reset(z.src) -- z.Pos = pos -- -- // since multiple frames can be concatenated, check for another one -- return z.readHeader(false) --} -- --// Reset discards the Reader's state and makes it equivalent to the --// result of its original state from NewReader, but reading from r instead. --// This permits reusing a Reader rather than allocating a new one. --func (z *Reader) Reset(r io.Reader) { -- z.Header = Header{} -- z.Pos = 0 -- z.src = r -- z.checksum.Reset() -- z.data = nil -- z.window = nil --} -- --// WriteTo decompresses the data from the underlying io.Reader and writes it to the io.Writer. --// Returns the number of bytes written. --func (z *Reader) WriteTo(w io.Writer) (n int64, err error) { -- cpus := runtime.GOMAXPROCS(0) -- var buf []byte -- -- // The initial buffer being nil, the first Read will be only read the compressed frame options. -- // The buffer can then be sized appropriately to support maximum concurrency decompression. -- // If multiple frames are concatenated, Read() will return with no data decompressed but with -- // potentially changed options. The buffer will be resized accordingly, always trying to -- // maximize concurrency. -- for { -- nsize := 0 -- // the block max size can change if multiple streams are concatenated. -- // Check it after every Read(). -- if z.BlockDependency { -- // in case of dependency, we cannot decompress concurrently, -- // so allocate the minimum buffer + window size -- nsize = len(z.window) + z.BlockMaxSize -- } else { -- // if no dependency, allocate a buffer large enough for concurrent decompression -- nsize = cpus * z.BlockMaxSize -- } -- if nsize != len(buf) { -- buf = make([]byte, nsize) -- } -- -- m, er := z.Read(buf) -- if er != nil && er != io.EOF { -- return n, er -- } -- m, err = w.Write(buf[:m]) -- n += int64(m) -- if err != nil || er == io.EOF { -- return -- } -- } --} -diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go -deleted file mode 100644 -index b1b712fe21516..0000000000000 ---- a/vendor/github.com/pierrec/lz4/writer.go -+++ /dev/null -@@ -1,377 +0,0 @@ --package lz4 -- --import ( -- ""encoding/binary"" -- ""fmt"" -- ""hash"" -- ""io"" -- ""runtime"" --) -- --// Writer implements the LZ4 frame encoder. --type Writer struct { -- Header -- dst io.Writer -- checksum hash.Hash32 // frame checksum -- data []byte // data to be compressed, only used when dealing with block dependency as we need 64Kb to work with -- window []byte // last 64KB of decompressed data (block dependency) + blockMaxSize buffer -- -- zbCompressBuf []byte // buffer for compressing lz4 blocks -- writeSizeBuf []byte // four-byte slice for writing checksums and sizes in writeblock --} -- --// NewWriter returns a new LZ4 frame encoder. --// No access to the underlying io.Writer is performed. --// The supplied Header is checked at the first Write. --// It is ok to change it before the first Write but then not until a Reset() is performed. --func NewWriter(dst io.Writer) *Writer { -- return &Writer{ -- dst: dst, -- checksum: hashPool.Get(), -- Header: Header{ -- BlockMaxSize: 4 << 20, -- }, -- writeSizeBuf: make([]byte, 4), -- } --} -- --// writeHeader builds and writes the header (magic+header) to the underlying io.Writer. --func (z *Writer) writeHeader() error { -- // Default to 4Mb if BlockMaxSize is not set -- if z.Header.BlockMaxSize == 0 { -- z.Header.BlockMaxSize = 4 << 20 -- } -- // the only option that need to be validated -- bSize, ok := bsMapValue[z.Header.BlockMaxSize] -- if !ok { -- return fmt.Errorf(""lz4: invalid block max size: %d"", z.Header.BlockMaxSize) -- } -- -- // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes -- // Size and DictID are optional -- var buf [19]byte -- -- // set the fixed size data: magic number, block max size and flags -- binary.LittleEndian.PutUint32(buf[0:], frameMagic) -- flg := byte(Version << 6) -- if !z.Header.BlockDependency { -- flg |= 1 << 5 -- } -- if z.Header.BlockChecksum { -- flg |= 1 << 4 -- } -- if z.Header.Size > 0 { -- flg |= 1 << 3 -- } -- if !z.Header.NoChecksum { -- flg |= 1 << 2 -- } -- // if z.Header.Dict { -- // flg |= 1 -- // } -- buf[4] = flg -- buf[5] = bSize << 4 -- -- // current buffer size: magic(4) + flags(1) + block max size (1) -- n := 6 -- // optional items -- if z.Header.Size > 0 { -- binary.LittleEndian.PutUint64(buf[n:], z.Header.Size) -- n += 8 -- } -- // if z.Header.Dict { -- // binary.LittleEndian.PutUint32(buf[n:], z.Header.DictID) -- // n += 4 -- // } -- -- // header checksum includes the flags, block max size and optional Size and DictID -- z.checksum.Write(buf[4:n]) -- buf[n] = byte(z.checksum.Sum32() >> 8 & 0xFF) -- z.checksum.Reset() -- -- // header ready, write it out -- if _, err := z.dst.Write(buf[0 : n+1]); err != nil { -- return err -- } -- z.Header.done = true -- -- // initialize buffers dependent on header info -- z.zbCompressBuf = make([]byte, winSize+z.BlockMaxSize) -- -- return nil --} -- --// Write compresses data from the supplied buffer into the underlying io.Writer. --// Write does not return until the data has been written. --// --// If the input buffer is large enough (typically in multiples of BlockMaxSize) --// the data will be compressed concurrently. --// --// Write never buffers any data unless in BlockDependency mode where it may --// do so until it has 64Kb of data, after which it never buffers any. --func (z *Writer) Write(buf []byte) (n int, err error) { -- if !z.Header.done { -- if err = z.writeHeader(); err != nil { -- return -- } -- } -- -- if len(buf) == 0 { -- return -- } -- -- if !z.NoChecksum { -- z.checksum.Write(buf) -- } -- -- // with block dependency, require at least 64Kb of data to work with -- // not having 64Kb only matters initially to setup the first window -- bl := 0 -- if z.BlockDependency && len(z.window) == 0 { -- bl = len(z.data) -- z.data = append(z.data, buf...) -- if len(z.data) < winSize { -- return len(buf), nil -- } -- buf = z.data -- z.data = nil -- } -- -- // Break up the input buffer into BlockMaxSize blocks, provisioning the left over block. -- // Then compress into each of them concurrently if possible (no dependency). -- var ( -- zb block -- wbuf = buf -- zn = len(wbuf) / z.BlockMaxSize -- zi = 0 -- leftover = len(buf) % z.BlockMaxSize -- ) -- --loop: -- for zi < zn { -- if z.BlockDependency { -- if zi == 0 { -- // first block does not have the window -- zb.data = append(z.window, wbuf[:z.BlockMaxSize]...) -- zb.offset = len(z.window) -- wbuf = wbuf[z.BlockMaxSize-winSize:] -- } else { -- // set the uncompressed data including the window from previous block -- zb.data = wbuf[:z.BlockMaxSize+winSize] -- zb.offset = winSize -- wbuf = wbuf[z.BlockMaxSize:] -- } -- } else { -- zb.data = wbuf[:z.BlockMaxSize] -- wbuf = wbuf[z.BlockMaxSize:] -- } -- -- goto write -- } -- -- // left over -- if leftover > 0 { -- zb = block{data: wbuf} -- if z.BlockDependency { -- if zn == 0 { -- zb.data = append(z.window, zb.data...) -- zb.offset = len(z.window) -- } else { -- zb.offset = winSize -- } -- } -- -- leftover = 0 -- goto write -- } -- -- if z.BlockDependency { -- if len(z.window) == 0 { -- z.window = make([]byte, winSize) -- } -- // last buffer may be shorter than the window -- if len(buf) >= winSize { -- copy(z.window, buf[len(buf)-winSize:]) -- } else { -- copy(z.window, z.window[len(buf):]) -- copy(z.window[len(buf)+1:], buf) -- } -- } -- -- return -- --write: -- zb = z.compressBlock(zb) -- _, err = z.writeBlock(zb) -- -- written := len(zb.data) -- if bl > 0 { -- if written >= bl { -- written -= bl -- bl = 0 -- } else { -- bl -= written -- written = 0 -- } -- } -- -- n += written -- // remove the window in zb.data -- if z.BlockDependency { -- if zi == 0 { -- n -= len(z.window) -- } else { -- n -= winSize -- } -- } -- if err != nil { -- return -- } -- zi++ -- goto loop --} -- --// compressBlock compresses a block. --func (z *Writer) compressBlock(zb block) block { -- // compressed block size cannot exceed the input's -- var ( -- n int -- err error -- zbuf = z.zbCompressBuf -- ) -- if z.HighCompression { -- n, err = CompressBlockHC(zb.data, zbuf, zb.offset) -- } else { -- n, err = CompressBlock(zb.data, zbuf, zb.offset) -- } -- -- // compressible and compressed size smaller than decompressed: ok! -- if err == nil && n > 0 && len(zb.zdata) < len(zb.data) { -- zb.compressed = true -- zb.zdata = zbuf[:n] -- } else { -- zb.compressed = false -- zb.zdata = zb.data[zb.offset:] -- } -- -- if z.BlockChecksum { -- xxh := hashPool.Get() -- xxh.Write(zb.zdata) -- zb.checksum = xxh.Sum32() -- hashPool.Put(xxh) -- } -- -- return zb --} -- --// writeBlock writes a frame block to the underlying io.Writer (size, data). --func (z *Writer) writeBlock(zb block) (int, error) { -- bLen := uint32(len(zb.zdata)) -- if !zb.compressed { -- bLen |= 1 << 31 -- } -- -- n := 0 -- -- binary.LittleEndian.PutUint32(z.writeSizeBuf, bLen) -- n, err := z.dst.Write(z.writeSizeBuf) -- if err != nil { -- return n, err -- } -- -- m, err := z.dst.Write(zb.zdata) -- n += m -- if err != nil { -- return n, err -- } -- -- if z.BlockChecksum { -- binary.LittleEndian.PutUint32(z.writeSizeBuf, zb.checksum) -- m, err := z.dst.Write(z.writeSizeBuf) -- n += m -- -- if err != nil { -- return n, err -- } -- } -- -- return n, nil --} -- --// Flush flushes any pending compressed data to the underlying writer. --// Flush does not return until the data has been written. --// If the underlying writer returns an error, Flush returns that error. --// --// Flush is only required when in BlockDependency mode and the total of --// data written is less than 64Kb. --func (z *Writer) Flush() error { -- if len(z.data) == 0 { -- return nil -- } -- -- zb := z.compressBlock(block{data: z.data}) -- if _, err := z.writeBlock(zb); err != nil { -- return err -- } -- return nil --} -- --// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. --func (z *Writer) Close() error { -- if !z.Header.done { -- if err := z.writeHeader(); err != nil { -- return err -- } -- } -- -- // buffered data for the block dependency window -- if z.BlockDependency && len(z.data) > 0 { -- zb := block{data: z.data} -- if _, err := z.writeBlock(z.compressBlock(zb)); err != nil { -- return err -- } -- } -- -- if err := binary.Write(z.dst, binary.LittleEndian, uint32(0)); err != nil { -- return err -- } -- if !z.NoChecksum { -- if err := binary.Write(z.dst, binary.LittleEndian, z.checksum.Sum32()); err != nil { -- return err -- } -- } -- return nil --} -- --// Reset clears the state of the Writer z such that it is equivalent to its --// initial state from NewWriter, but instead writing to w. --// No access to the underlying io.Writer is performed. --func (z *Writer) Reset(w io.Writer) { -- z.Header = Header{} -- z.dst = w -- z.checksum.Reset() -- z.data = nil -- z.window = nil --} -- --// ReadFrom compresses the data read from the io.Reader and writes it to the underlying io.Writer. --// Returns the number of bytes read. --// It does not close the Writer. --func (z *Writer) ReadFrom(r io.Reader) (n int64, err error) { -- cpus := runtime.GOMAXPROCS(0) -- buf := make([]byte, cpus*z.BlockMaxSize) -- for { -- m, er := io.ReadFull(r, buf) -- n += int64(m) -- if er == nil || er == io.ErrUnexpectedEOF || er == io.EOF { -- if _, err = z.Write(buf[:m]); err != nil { -- return -- } -- if er == nil { -- continue -- } -- return -- } -- return n, er -- } --} -diff --git a/vendor/github.com/pierrec/xxHash/LICENSE b/vendor/github.com/pierrec/xxHash/LICENSE -deleted file mode 100644 -index c1418f3f677a1..0000000000000 ---- a/vendor/github.com/pierrec/xxHash/LICENSE -+++ /dev/null -@@ -1,28 +0,0 @@ --Copyright (c) 2014, Pierre Curto --All rights reserved. -- --Redistribution and use in source and binary forms, with or without --modification, are permitted provided that the following conditions are met: -- --* Redistributions of source code must retain the above copyright notice, this -- list of conditions and the following disclaimer. -- --* Redistributions in binary form must reproduce the above copyright notice, -- this list of conditions and the following disclaimer in the documentation -- and/or other materials provided with the distribution. -- --* Neither the name of xxHash nor the names of its -- contributors may be used to endorse or promote products derived from -- this software without specific prior written permission. -- --THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ""AS IS"" --AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE --IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE --DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE --FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL --DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR --SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER --CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, --OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE --OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -- -diff --git a/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go b/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go -deleted file mode 100644 -index 411504e4bb875..0000000000000 ---- a/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go -+++ /dev/null -@@ -1,205 +0,0 @@ --// Package xxHash32 implements the very fast xxHash hashing algorithm (32 bits version). --// (https://github.com/Cyan4973/xxHash/) --package xxHash32 -- --import ""hash"" -- --const ( -- prime32_1 = 2654435761 -- prime32_2 = 2246822519 -- prime32_3 = 3266489917 -- prime32_4 = 668265263 -- prime32_5 = 374761393 --) -- --type xxHash struct { -- seed uint32 -- v1 uint32 -- v2 uint32 -- v3 uint32 -- v4 uint32 -- totalLen uint64 -- buf [16]byte -- bufused int --} -- --// New returns a new Hash32 instance. --func New(seed uint32) hash.Hash32 { -- xxh := &xxHash{seed: seed} -- xxh.Reset() -- return xxh --} -- --// Sum appends the current hash to b and returns the resulting slice. --// It does not change the underlying hash state. --func (xxh xxHash) Sum(b []byte) []byte { -- h32 := xxh.Sum32() -- return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) --} -- --// Reset resets the Hash to its initial state. --func (xxh *xxHash) Reset() { -- xxh.v1 = xxh.seed + prime32_1 + prime32_2 -- xxh.v2 = xxh.seed + prime32_2 -- xxh.v3 = xxh.seed -- xxh.v4 = xxh.seed - prime32_1 -- xxh.totalLen = 0 -- xxh.bufused = 0 --} -- --// Size returns the number of bytes returned by Sum(). --func (xxh *xxHash) Size() int { -- return 4 --} -- --// BlockSize gives the minimum number of bytes accepted by Write(). --func (xxh *xxHash) BlockSize() int { -- return 1 --} -- --// Write adds input bytes to the Hash. --// It never returns an error. --func (xxh *xxHash) Write(input []byte) (int, error) { -- n := len(input) -- m := xxh.bufused -- -- xxh.totalLen += uint64(n) -- -- r := len(xxh.buf) - m -- if n < r { -- copy(xxh.buf[m:], input) -- xxh.bufused += len(input) -- return n, nil -- } -- -- p := 0 -- if m > 0 { -- // some data left from previous update -- copy(xxh.buf[xxh.bufused:], input[:r]) -- xxh.bufused += len(input) - r -- -- // fast rotl(13) -- p32 := xxh.v1 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2 -- xxh.v1 = (p32<<13 | p32>>19) * prime32_1 -- p += 4 -- p32 = xxh.v2 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2 -- xxh.v2 = (p32<<13 | p32>>19) * prime32_1 -- p += 4 -- p32 = xxh.v3 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2 -- xxh.v3 = (p32<<13 | p32>>19) * prime32_1 -- p += 4 -- p32 = xxh.v4 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2 -- xxh.v4 = (p32<<13 | p32>>19) * prime32_1 -- -- p = r -- xxh.bufused = 0 -- } -- -- for n := n - 16; p <= n; { -- p32 := xxh.v1 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2 -- xxh.v1 = (p32<<13 | p32>>19) * prime32_1 -- p += 4 -- p32 = xxh.v2 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2 -- xxh.v2 = (p32<<13 | p32>>19) * prime32_1 -- p += 4 -- p32 = xxh.v3 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2 -- xxh.v3 = (p32<<13 | p32>>19) * prime32_1 -- p += 4 -- p32 = xxh.v4 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2 -- xxh.v4 = (p32<<13 | p32>>19) * prime32_1 -- p += 4 -- } -- -- copy(xxh.buf[xxh.bufused:], input[p:]) -- xxh.bufused += len(input) - p -- -- return n, nil --} -- --// Sum32 returns the 32 bits Hash value. --func (xxh *xxHash) Sum32() uint32 { -- h32 := uint32(xxh.totalLen) -- if xxh.totalLen >= 16 { -- h32 += ((xxh.v1 << 1) | (xxh.v1 >> 31)) + -- ((xxh.v2 << 7) | (xxh.v2 >> 25)) + -- ((xxh.v3 << 12) | (xxh.v3 >> 20)) + -- ((xxh.v4 << 18) | (xxh.v4 >> 14)) -- } else { -- h32 += xxh.seed + prime32_5 -- } -- -- p := 0 -- n := xxh.bufused -- for n := n - 4; p <= n; p += 4 { -- h32 += (uint32(xxh.buf[p+3])<<24 | uint32(xxh.buf[p+2])<<16 | uint32(xxh.buf[p+1])<<8 | uint32(xxh.buf[p])) * prime32_3 -- h32 = ((h32 << 17) | (h32 >> 15)) * prime32_4 -- } -- for ; p < n; p++ { -- h32 += uint32(xxh.buf[p]) * prime32_5 -- h32 = ((h32 << 11) | (h32 >> 21)) * prime32_1 -- } -- -- h32 ^= h32 >> 15 -- h32 *= prime32_2 -- h32 ^= h32 >> 13 -- h32 *= prime32_3 -- h32 ^= h32 >> 16 -- -- return h32 --} -- --// Checksum returns the 32bits Hash value. --func Checksum(input []byte, seed uint32) uint32 { -- n := len(input) -- h32 := uint32(n) -- -- if n < 16 { -- h32 += seed + prime32_5 -- } else { -- v1 := seed + prime32_1 + prime32_2 -- v2 := seed + prime32_2 -- v3 := seed -- v4 := seed - prime32_1 -- p := 0 -- for p <= n-16 { -- v1 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2 -- v1 = (v1<<13 | v1>>19) * prime32_1 -- p += 4 -- v2 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2 -- v2 = (v2<<13 | v2>>19) * prime32_1 -- p += 4 -- v3 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2 -- v3 = (v3<<13 | v3>>19) * prime32_1 -- p += 4 -- v4 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2 -- v4 = (v4<<13 | v4>>19) * prime32_1 -- p += 4 -- } -- input = input[p:] -- n -= p -- h32 += ((v1 << 1) | (v1 >> 31)) + -- ((v2 << 7) | (v2 >> 25)) + -- ((v3 << 12) | (v3 >> 20)) + -- ((v4 << 18) | (v4 >> 14)) -- } -- -- p := 0 -- for p <= n-4 { -- h32 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_3 -- h32 = ((h32 << 17) | (h32 >> 15)) * prime32_4 -- p += 4 -- } -- for p < n { -- h32 += uint32(input[p]) * prime32_5 -- h32 = ((h32 << 11) | (h32 >> 21)) * prime32_1 -- p++ -- } -- -- h32 ^= h32 >> 15 -- h32 *= prime32_2 -- h32 ^= h32 >> 13 -- h32 *= prime32_3 -- h32 ^= h32 >> 16 -- -- return h32 --} -diff --git a/vendor/github.com/rcrowley/go-metrics/.gitignore b/vendor/github.com/rcrowley/go-metrics/.gitignore -deleted file mode 100644 -index 83c8f82374a29..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/.gitignore -+++ /dev/null -@@ -1,9 +0,0 @@ --*.[68] --*.a --*.out --*.swp --_obj --_testmain.go --cmd/metrics-bench/metrics-bench --cmd/metrics-example/metrics-example --cmd/never-read/never-read -diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml -deleted file mode 100644 -index 117763e6509e7..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/.travis.yml -+++ /dev/null -@@ -1,17 +0,0 @@ --language: go -- --go: -- - 1.3 -- - 1.4 -- - 1.5 -- - 1.6 -- - 1.7 -- - 1.8 -- - 1.9 -- --script: -- - ./validate.sh -- --# this should give us faster builds according to --# http://docs.travis-ci.com/user/migrating-from-legacy/ --sudo: false -diff --git a/vendor/github.com/rcrowley/go-metrics/LICENSE b/vendor/github.com/rcrowley/go-metrics/LICENSE -deleted file mode 100644 -index 363fa9ee77b83..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/LICENSE -+++ /dev/null -@@ -1,29 +0,0 @@ --Copyright 2012 Richard Crowley. All rights reserved. -- --Redistribution and use in source and binary forms, with or without --modification, are permitted provided that the following conditions are --met: -- -- 1. Redistributions of source code must retain the above copyright -- notice, this list of conditions and the following disclaimer. -- -- 2. Redistributions in binary form must reproduce the above -- copyright notice, this list of conditions and the following -- disclaimer in the documentation and/or other materials provided -- with the distribution. -- --THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS --OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED --WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE --DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE --FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR --CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF --SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS --INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN --CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) --ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF --THE POSSIBILITY OF SUCH DAMAGE. -- --The views and conclusions contained in the software and documentation --are those of the authors and should not be interpreted as representing --official policies, either expressed or implied, of Richard Crowley. -diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md -deleted file mode 100644 -index 17cea76de643a..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/README.md -+++ /dev/null -@@ -1,167 +0,0 @@ --go-metrics --========== -- --![travis build status](https://travis-ci.org/rcrowley/go-metrics.svg?branch=master) -- --Go port of Coda Hale's Metrics library: . -- --Documentation: . -- --Usage ------- -- --Create and update metrics: -- --```go --c := metrics.NewCounter() --metrics.Register(""foo"", c) --c.Inc(47) -- --g := metrics.NewGauge() --metrics.Register(""bar"", g) --g.Update(47) -- --r := NewRegistry() --g := metrics.NewRegisteredFunctionalGauge(""cache-evictions"", r, func() int64 { return cache.getEvictionsCount() }) -- --s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028) --h := metrics.NewHistogram(s) --metrics.Register(""baz"", h) --h.Update(47) -- --m := metrics.NewMeter() --metrics.Register(""quux"", m) --m.Mark(47) -- --t := metrics.NewTimer() --metrics.Register(""bang"", t) --t.Time(func() {}) --t.Update(47) --``` -- --Register() is not threadsafe. For threadsafe metric registration use --GetOrRegister: -- --```go --t := metrics.GetOrRegisterTimer(""account.create.latency"", nil) --t.Time(func() {}) --t.Update(47) --``` -- --**NOTE:** Be sure to unregister short-lived meters and timers otherwise they will --leak memory: -- --```go --// Will call Stop() on the Meter to allow for garbage collection --metrics.Unregister(""quux"") --// Or similarly for a Timer that embeds a Meter --metrics.Unregister(""bang"") --``` -- --Periodically log every metric in human-readable form to standard error: -- --```go --go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, ""metrics: "", log.Lmicroseconds)) --``` -- --Periodically log every metric in slightly-more-parseable form to syslog: -- --```go --w, _ := syslog.Dial(""unixgram"", ""/dev/log"", syslog.LOG_INFO, ""metrics"") --go metrics.Syslog(metrics.DefaultRegistry, 60e9, w) --``` -- --Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite): -- --```go -- --import ""github.com/cyberdelia/go-metrics-graphite"" -- --addr, _ := net.ResolveTCPAddr(""tcp"", ""127.0.0.1:2003"") --go graphite.Graphite(metrics.DefaultRegistry, 10e9, ""metrics"", addr) --``` -- --Periodically emit every metric into InfluxDB: -- --**NOTE:** this has been pulled out of the library due to constant fluctuations --in the InfluxDB API. In fact, all client libraries are on their way out. see --issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and --[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details. -- --```go --import ""github.com/vrischmann/go-metrics-influxdb"" -- --go influxdb.InfluxDB(metrics.DefaultRegistry, -- 10e9, -- ""127.0.0.1:8086"", -- ""database-name"", -- ""username"", -- ""password"" --) --``` -- --Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato): -- --**Note**: the client included with this repository under the `librato` package --has been deprecated and moved to the repository linked above. -- --```go --import ""github.com/mihasya/go-metrics-librato"" -- --go librato.Librato(metrics.DefaultRegistry, -- 10e9, // interval -- ""example@example.com"", // account owner email address -- ""token"", // Librato API token -- ""hostname"", // source -- []float64{0.95}, // percentiles to send -- time.Millisecond, // time unit --) --``` -- --Periodically emit every metric to StatHat: -- --```go --import ""github.com/rcrowley/go-metrics/stathat"" -- --go stathat.Stathat(metrics.DefaultRegistry, 10e9, ""example@example.com"") --``` -- --Maintain all metrics along with expvars at `/debug/metrics`: -- --This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/) --but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars --as well as all your go-metrics. -- -- --```go --import ""github.com/rcrowley/go-metrics/exp"" -- --exp.Exp(metrics.DefaultRegistry) --``` -- --Installation -------------- -- --```sh --go get github.com/rcrowley/go-metrics --``` -- --StatHat support additionally requires their Go client: -- --```sh --go get github.com/stathat/go --``` -- --Publishing Metrics -------------------- -- --Clients are available for the following destinations: -- --* Librato - https://github.com/mihasya/go-metrics-librato --* Graphite - https://github.com/cyberdelia/go-metrics-graphite --* InfluxDB - https://github.com/vrischmann/go-metrics-influxdb --* Ganglia - https://github.com/appscode/metlia --* Prometheus - https://github.com/deathowl/go-metrics-prometheus --* DataDog - https://github.com/syntaqx/go-metrics-datadog --* SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx --* Honeycomb - https://github.com/getspine/go-metrics-honeycomb -diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/vendor/github.com/rcrowley/go-metrics/counter.go -deleted file mode 100644 -index bb7b039cb5725..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/counter.go -+++ /dev/null -@@ -1,112 +0,0 @@ --package metrics -- --import ""sync/atomic"" -- --// Counters hold an int64 value that can be incremented and decremented. --type Counter interface { -- Clear() -- Count() int64 -- Dec(int64) -- Inc(int64) -- Snapshot() Counter --} -- --// GetOrRegisterCounter returns an existing Counter or constructs and registers --// a new StandardCounter. --func GetOrRegisterCounter(name string, r Registry) Counter { -- if nil == r { -- r = DefaultRegistry -- } -- return r.GetOrRegister(name, NewCounter).(Counter) --} -- --// NewCounter constructs a new StandardCounter. --func NewCounter() Counter { -- if UseNilMetrics { -- return NilCounter{} -- } -- return &StandardCounter{0} --} -- --// NewRegisteredCounter constructs and registers a new StandardCounter. --func NewRegisteredCounter(name string, r Registry) Counter { -- c := NewCounter() -- if nil == r { -- r = DefaultRegistry -- } -- r.Register(name, c) -- return c --} -- --// CounterSnapshot is a read-only copy of another Counter. --type CounterSnapshot int64 -- --// Clear panics. --func (CounterSnapshot) Clear() { -- panic(""Clear called on a CounterSnapshot"") --} -- --// Count returns the count at the time the snapshot was taken. --func (c CounterSnapshot) Count() int64 { return int64(c) } -- --// Dec panics. --func (CounterSnapshot) Dec(int64) { -- panic(""Dec called on a CounterSnapshot"") --} -- --// Inc panics. --func (CounterSnapshot) Inc(int64) { -- panic(""Inc called on a CounterSnapshot"") --} -- --// Snapshot returns the snapshot. --func (c CounterSnapshot) Snapshot() Counter { return c } -- --// NilCounter is a no-op Counter. --type NilCounter struct{} -- --// Clear is a no-op. --func (NilCounter) Clear() {} -- --// Count is a no-op. --func (NilCounter) Count() int64 { return 0 } -- --// Dec is a no-op. --func (NilCounter) Dec(i int64) {} -- --// Inc is a no-op. --func (NilCounter) Inc(i int64) {} -- --// Snapshot is a no-op. --func (NilCounter) Snapshot() Counter { return NilCounter{} } -- --// StandardCounter is the standard implementation of a Counter and uses the --// sync/atomic package to manage a single int64 value. --type StandardCounter struct { -- count int64 --} -- --// Clear sets the counter to zero. --func (c *StandardCounter) Clear() { -- atomic.StoreInt64(&c.count, 0) --} -- --// Count returns the current count. --func (c *StandardCounter) Count() int64 { -- return atomic.LoadInt64(&c.count) --} -- --// Dec decrements the counter by the given amount. --func (c *StandardCounter) Dec(i int64) { -- atomic.AddInt64(&c.count, -i) --} -- --// Inc increments the counter by the given amount. --func (c *StandardCounter) Inc(i int64) { -- atomic.AddInt64(&c.count, i) --} -- --// Snapshot returns a read-only copy of the counter. --func (c *StandardCounter) Snapshot() Counter { -- return CounterSnapshot(c.Count()) --} -diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go -deleted file mode 100644 -index 043ccefab612d..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/debug.go -+++ /dev/null -@@ -1,76 +0,0 @@ --package metrics -- --import ( -- ""runtime/debug"" -- ""time"" --) -- --var ( -- debugMetrics struct { -- GCStats struct { -- LastGC Gauge -- NumGC Gauge -- Pause Histogram -- //PauseQuantiles Histogram -- PauseTotal Gauge -- } -- ReadGCStats Timer -- } -- gcStats debug.GCStats --) -- --// Capture new values for the Go garbage collector statistics exported in --// debug.GCStats. This is designed to be called as a goroutine. --func CaptureDebugGCStats(r Registry, d time.Duration) { -- for _ = range time.Tick(d) { -- CaptureDebugGCStatsOnce(r) -- } --} -- --// Capture new values for the Go garbage collector statistics exported in --// debug.GCStats. This is designed to be called in a background goroutine. --// Giving a registry which has not been given to RegisterDebugGCStats will --// panic. --// --// Be careful (but much less so) with this because debug.ReadGCStats calls --// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world --// operation, isn't something you want to be doing all the time. --func CaptureDebugGCStatsOnce(r Registry) { -- lastGC := gcStats.LastGC -- t := time.Now() -- debug.ReadGCStats(&gcStats) -- debugMetrics.ReadGCStats.UpdateSince(t) -- -- debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano())) -- debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC)) -- if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) { -- debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0])) -- } -- //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles) -- debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal)) --} -- --// Register metrics for the Go garbage collector statistics exported in --// debug.GCStats. The metrics are named by their fully-qualified Go symbols, --// i.e. debug.GCStats.PauseTotal. --func RegisterDebugGCStats(r Registry) { -- debugMetrics.GCStats.LastGC = NewGauge() -- debugMetrics.GCStats.NumGC = NewGauge() -- debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015)) -- //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015)) -- debugMetrics.GCStats.PauseTotal = NewGauge() -- debugMetrics.ReadGCStats = NewTimer() -- -- r.Register(""debug.GCStats.LastGC"", debugMetrics.GCStats.LastGC) -- r.Register(""debug.GCStats.NumGC"", debugMetrics.GCStats.NumGC) -- r.Register(""debug.GCStats.Pause"", debugMetrics.GCStats.Pause) -- //r.Register(""debug.GCStats.PauseQuantiles"", debugMetrics.GCStats.PauseQuantiles) -- r.Register(""debug.GCStats.PauseTotal"", debugMetrics.GCStats.PauseTotal) -- r.Register(""debug.ReadGCStats"", debugMetrics.ReadGCStats) --} -- --// Allocate an initial slice for gcStats.Pause to avoid allocations during --// normal operation. --func init() { -- gcStats.Pause = make([]time.Duration, 11) --} -diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go -deleted file mode 100644 -index a8183dd7e21f8..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/ewma.go -+++ /dev/null -@@ -1,138 +0,0 @@ --package metrics -- --import ( -- ""math"" -- ""sync"" -- ""sync/atomic"" --) -- --// EWMAs continuously calculate an exponentially-weighted moving average --// based on an outside source of clock ticks. --type EWMA interface { -- Rate() float64 -- Snapshot() EWMA -- Tick() -- Update(int64) --} -- --// NewEWMA constructs a new EWMA with the given alpha. --func NewEWMA(alpha float64) EWMA { -- if UseNilMetrics { -- return NilEWMA{} -- } -- return &StandardEWMA{alpha: alpha} --} -- --// NewEWMA1 constructs a new EWMA for a one-minute moving average. --func NewEWMA1() EWMA { -- return NewEWMA(1 - math.Exp(-5.0/60.0/1)) --} -- --// NewEWMA5 constructs a new EWMA for a five-minute moving average. --func NewEWMA5() EWMA { -- return NewEWMA(1 - math.Exp(-5.0/60.0/5)) --} -- --// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average. --func NewEWMA15() EWMA { -- return NewEWMA(1 - math.Exp(-5.0/60.0/15)) --} -- --// EWMASnapshot is a read-only copy of another EWMA. --type EWMASnapshot float64 -- --// Rate returns the rate of events per second at the time the snapshot was --// taken. --func (a EWMASnapshot) Rate() float64 { return float64(a) } -- --// Snapshot returns the snapshot. --func (a EWMASnapshot) Snapshot() EWMA { return a } -- --// Tick panics. --func (EWMASnapshot) Tick() { -- panic(""Tick called on an EWMASnapshot"") --} -- --// Update panics. --func (EWMASnapshot) Update(int64) { -- panic(""Update called on an EWMASnapshot"") --} -- --// NilEWMA is a no-op EWMA. --type NilEWMA struct{} -- --// Rate is a no-op. --func (NilEWMA) Rate() float64 { return 0.0 } -- --// Snapshot is a no-op. --func (NilEWMA) Snapshot() EWMA { return NilEWMA{} } -- --// Tick is a no-op. --func (NilEWMA) Tick() {} -- --// Update is a no-op. --func (NilEWMA) Update(n int64) {} -- --// StandardEWMA is the standard implementation of an EWMA and tracks the number --// of uncounted events and processes them on each tick. It uses the --// sync/atomic package to manage uncounted events. --type StandardEWMA struct { -- uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment -- alpha float64 -- rate uint64 -- init uint32 -- mutex sync.Mutex --} -- --// Rate returns the moving average rate of events per second. --func (a *StandardEWMA) Rate() float64 { -- currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) * float64(1e9) -- return currentRate --} -- --// Snapshot returns a read-only copy of the EWMA. --func (a *StandardEWMA) Snapshot() EWMA { -- return EWMASnapshot(a.Rate()) --} -- --// Tick ticks the clock to update the moving average. It assumes it is called --// every five seconds. --func (a *StandardEWMA) Tick() { -- // Optimization to avoid mutex locking in the hot-path. -- if atomic.LoadUint32(&a.init) == 1 { -- a.updateRate(a.fetchInstantRate()) -- } else { -- // Slow-path: this is only needed on the first Tick() and preserves transactional updating -- // of init and rate in the else block. The first conditional is needed below because -- // a different thread could have set a.init = 1 between the time of the first atomic load and when -- // the lock was acquired. -- a.mutex.Lock() -- if atomic.LoadUint32(&a.init) == 1 { -- // The fetchInstantRate() uses atomic loading, which is unecessary in this critical section -- // but again, this section is only invoked on the first successful Tick() operation. -- a.updateRate(a.fetchInstantRate()) -- } else { -- atomic.StoreUint32(&a.init, 1) -- atomic.StoreUint64(&a.rate, math.Float64bits(a.fetchInstantRate())) -- } -- a.mutex.Unlock() -- } --} -- --func (a *StandardEWMA) fetchInstantRate() float64 { -- count := atomic.LoadInt64(&a.uncounted) -- atomic.AddInt64(&a.uncounted, -count) -- instantRate := float64(count) / float64(5e9) -- return instantRate --} -- --func (a *StandardEWMA) updateRate(instantRate float64) { -- currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) -- currentRate += a.alpha * (instantRate - currentRate) -- atomic.StoreUint64(&a.rate, math.Float64bits(currentRate)) --} -- --// Update adds n uncounted events. --func (a *StandardEWMA) Update(n int64) { -- atomic.AddInt64(&a.uncounted, n) --} -diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go -deleted file mode 100644 -index cb57a93889fc2..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/gauge.go -+++ /dev/null -@@ -1,120 +0,0 @@ --package metrics -- --import ""sync/atomic"" -- --// Gauges hold an int64 value that can be set arbitrarily. --type Gauge interface { -- Snapshot() Gauge -- Update(int64) -- Value() int64 --} -- --// GetOrRegisterGauge returns an existing Gauge or constructs and registers a --// new StandardGauge. --func GetOrRegisterGauge(name string, r Registry) Gauge { -- if nil == r { -- r = DefaultRegistry -- } -- return r.GetOrRegister(name, NewGauge).(Gauge) --} -- --// NewGauge constructs a new StandardGauge. --func NewGauge() Gauge { -- if UseNilMetrics { -- return NilGauge{} -- } -- return &StandardGauge{0} --} -- --// NewRegisteredGauge constructs and registers a new StandardGauge. --func NewRegisteredGauge(name string, r Registry) Gauge { -- c := NewGauge() -- if nil == r { -- r = DefaultRegistry -- } -- r.Register(name, c) -- return c --} -- --// NewFunctionalGauge constructs a new FunctionalGauge. --func NewFunctionalGauge(f func() int64) Gauge { -- if UseNilMetrics { -- return NilGauge{} -- } -- return &FunctionalGauge{value: f} --} -- --// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. --func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge { -- c := NewFunctionalGauge(f) -- if nil == r { -- r = DefaultRegistry -- } -- r.Register(name, c) -- return c --} -- --// GaugeSnapshot is a read-only copy of another Gauge. --type GaugeSnapshot int64 -- --// Snapshot returns the snapshot. --func (g GaugeSnapshot) Snapshot() Gauge { return g } -- --// Update panics. --func (GaugeSnapshot) Update(int64) { -- panic(""Update called on a GaugeSnapshot"") --} -- --// Value returns the value at the time the snapshot was taken. --func (g GaugeSnapshot) Value() int64 { return int64(g) } -- --// NilGauge is a no-op Gauge. --type NilGauge struct{} -- --// Snapshot is a no-op. --func (NilGauge) Snapshot() Gauge { return NilGauge{} } -- --// Update is a no-op. --func (NilGauge) Update(v int64) {} -- --// Value is a no-op. --func (NilGauge) Value() int64 { return 0 } -- --// StandardGauge is the standard implementation of a Gauge and uses the --// sync/atomic package to manage a single int64 value. --type StandardGauge struct { -- value int64 --} -- --// Snapshot returns a read-only copy of the gauge. --func (g *StandardGauge) Snapshot() Gauge { -- return GaugeSnapshot(g.Value()) --} -- --// Update updates the gauge's value. --func (g *StandardGauge) Update(v int64) { -- atomic.StoreInt64(&g.value, v) --} -- --// Value returns the gauge's current value. --func (g *StandardGauge) Value() int64 { -- return atomic.LoadInt64(&g.value) --} -- --// FunctionalGauge returns value from given function --type FunctionalGauge struct { -- value func() int64 --} -- --// Value returns the gauge's current value. --func (g FunctionalGauge) Value() int64 { -- return g.value() --} -- --// Snapshot returns the snapshot. --func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) } -- --// Update panics. --func (FunctionalGauge) Update(int64) { -- panic(""Update called on a FunctionalGauge"") --} -diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go -deleted file mode 100644 -index 3962e6db09a8c..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go -+++ /dev/null -@@ -1,125 +0,0 @@ --package metrics -- --import ( -- ""math"" -- ""sync/atomic"" --) -- --// GaugeFloat64s hold a float64 value that can be set arbitrarily. --type GaugeFloat64 interface { -- Snapshot() GaugeFloat64 -- Update(float64) -- Value() float64 --} -- --// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a --// new StandardGaugeFloat64. --func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 { -- if nil == r { -- r = DefaultRegistry -- } -- return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64) --} -- --// NewGaugeFloat64 constructs a new StandardGaugeFloat64. --func NewGaugeFloat64() GaugeFloat64 { -- if UseNilMetrics { -- return NilGaugeFloat64{} -- } -- return &StandardGaugeFloat64{ -- value: 0.0, -- } --} -- --// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64. --func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 { -- c := NewGaugeFloat64() -- if nil == r { -- r = DefaultRegistry -- } -- r.Register(name, c) -- return c --} -- --// NewFunctionalGauge constructs a new FunctionalGauge. --func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 { -- if UseNilMetrics { -- return NilGaugeFloat64{} -- } -- return &FunctionalGaugeFloat64{value: f} --} -- --// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. --func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 { -- c := NewFunctionalGaugeFloat64(f) -- if nil == r { -- r = DefaultRegistry -- } -- r.Register(name, c) -- return c --} -- --// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64. --type GaugeFloat64Snapshot float64 -- --// Snapshot returns the snapshot. --func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g } -- --// Update panics. --func (GaugeFloat64Snapshot) Update(float64) { -- panic(""Update called on a GaugeFloat64Snapshot"") --} -- --// Value returns the value at the time the snapshot was taken. --func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) } -- --// NilGauge is a no-op Gauge. --type NilGaugeFloat64 struct{} -- --// Snapshot is a no-op. --func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} } -- --// Update is a no-op. --func (NilGaugeFloat64) Update(v float64) {} -- --// Value is a no-op. --func (NilGaugeFloat64) Value() float64 { return 0.0 } -- --// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses --// sync.Mutex to manage a single float64 value. --type StandardGaugeFloat64 struct { -- value uint64 --} -- --// Snapshot returns a read-only copy of the gauge. --func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 { -- return GaugeFloat64Snapshot(g.Value()) --} -- --// Update updates the gauge's value. --func (g *StandardGaugeFloat64) Update(v float64) { -- atomic.StoreUint64(&g.value, math.Float64bits(v)) --} -- --// Value returns the gauge's current value. --func (g *StandardGaugeFloat64) Value() float64 { -- return math.Float64frombits(atomic.LoadUint64(&g.value)) --} -- --// FunctionalGaugeFloat64 returns value from given function --type FunctionalGaugeFloat64 struct { -- value func() float64 --} -- --// Value returns the gauge's current value. --func (g FunctionalGaugeFloat64) Value() float64 { -- return g.value() --} -- --// Snapshot returns the snapshot. --func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) } -- --// Update panics. --func (FunctionalGaugeFloat64) Update(float64) { -- panic(""Update called on a FunctionalGaugeFloat64"") --} -diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/vendor/github.com/rcrowley/go-metrics/graphite.go -deleted file mode 100644 -index abd0a7d2918bc..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/graphite.go -+++ /dev/null -@@ -1,113 +0,0 @@ --package metrics -- --import ( -- ""bufio"" -- ""fmt"" -- ""log"" -- ""net"" -- ""strconv"" -- ""strings"" -- ""time"" --) -- --// GraphiteConfig provides a container with configuration parameters for --// the Graphite exporter --type GraphiteConfig struct { -- Addr *net.TCPAddr // Network address to connect to -- Registry Registry // Registry to be exported -- FlushInterval time.Duration // Flush interval -- DurationUnit time.Duration // Time conversion unit for durations -- Prefix string // Prefix to be prepended to metric names -- Percentiles []float64 // Percentiles to export from timers and histograms --} -- --// Graphite is a blocking exporter function which reports metrics in r --// to a graphite server located at addr, flushing them every d duration --// and prepending metric names with prefix. --func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { -- GraphiteWithConfig(GraphiteConfig{ -- Addr: addr, -- Registry: r, -- FlushInterval: d, -- DurationUnit: time.Nanosecond, -- Prefix: prefix, -- Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999}, -- }) --} -- --// GraphiteWithConfig is a blocking exporter function just like Graphite, --// but it takes a GraphiteConfig instead. --func GraphiteWithConfig(c GraphiteConfig) { -- log.Printf(""WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015"") -- for _ = range time.Tick(c.FlushInterval) { -- if err := graphite(&c); nil != err { -- log.Println(err) -- } -- } --} -- --// GraphiteOnce performs a single submission to Graphite, returning a --// non-nil error on failed connections. This can be used in a loop --// similar to GraphiteWithConfig for custom error handling. --func GraphiteOnce(c GraphiteConfig) error { -- log.Printf(""WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015"") -- return graphite(&c) --} -- --func graphite(c *GraphiteConfig) error { -- now := time.Now().Unix() -- du := float64(c.DurationUnit) -- conn, err := net.DialTCP(""tcp"", nil, c.Addr) -- if nil != err { -- return err -- } -- defer conn.Close() -- w := bufio.NewWriter(conn) -- c.Registry.Each(func(name string, i interface{}) { -- switch metric := i.(type) { -- case Counter: -- fmt.Fprintf(w, ""%s.%s.count %d %d\n"", c.Prefix, name, metric.Count(), now) -- case Gauge: -- fmt.Fprintf(w, ""%s.%s.value %d %d\n"", c.Prefix, name, metric.Value(), now) -- case GaugeFloat64: -- fmt.Fprintf(w, ""%s.%s.value %f %d\n"", c.Prefix, name, metric.Value(), now) -- case Histogram: -- h := metric.Snapshot() -- ps := h.Percentiles(c.Percentiles) -- fmt.Fprintf(w, ""%s.%s.count %d %d\n"", c.Prefix, name, h.Count(), now) -- fmt.Fprintf(w, ""%s.%s.min %d %d\n"", c.Prefix, name, h.Min(), now) -- fmt.Fprintf(w, ""%s.%s.max %d %d\n"", c.Prefix, name, h.Max(), now) -- fmt.Fprintf(w, ""%s.%s.mean %.2f %d\n"", c.Prefix, name, h.Mean(), now) -- fmt.Fprintf(w, ""%s.%s.std-dev %.2f %d\n"", c.Prefix, name, h.StdDev(), now) -- for psIdx, psKey := range c.Percentiles { -- key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ""."", """", 1) -- fmt.Fprintf(w, ""%s.%s.%s-percentile %.2f %d\n"", c.Prefix, name, key, ps[psIdx], now) -- } -- case Meter: -- m := metric.Snapshot() -- fmt.Fprintf(w, ""%s.%s.count %d %d\n"", c.Prefix, name, m.Count(), now) -- fmt.Fprintf(w, ""%s.%s.one-minute %.2f %d\n"", c.Prefix, name, m.Rate1(), now) -- fmt.Fprintf(w, ""%s.%s.five-minute %.2f %d\n"", c.Prefix, name, m.Rate5(), now) -- fmt.Fprintf(w, ""%s.%s.fifteen-minute %.2f %d\n"", c.Prefix, name, m.Rate15(), now) -- fmt.Fprintf(w, ""%s.%s.mean %.2f %d\n"", c.Prefix, name, m.RateMean(), now) -- case Timer: -- t := metric.Snapshot() -- ps := t.Percentiles(c.Percentiles) -- fmt.Fprintf(w, ""%s.%s.count %d %d\n"", c.Prefix, name, t.Count(), now) -- fmt.Fprintf(w, ""%s.%s.min %d %d\n"", c.Prefix, name, t.Min()/int64(du), now) -- fmt.Fprintf(w, ""%s.%s.max %d %d\n"", c.Prefix, name, t.Max()/int64(du), now) -- fmt.Fprintf(w, ""%s.%s.mean %.2f %d\n"", c.Prefix, name, t.Mean()/du, now) -- fmt.Fprintf(w, ""%s.%s.std-dev %.2f %d\n"", c.Prefix, name, t.StdDev()/du, now) -- for psIdx, psKey := range c.Percentiles { -- key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ""."", """", 1) -- fmt.Fprintf(w, ""%s.%s.%s-percentile %.2f %d\n"", c.Prefix, name, key, ps[psIdx], now) -- } -- fmt.Fprintf(w, ""%s.%s.one-minute %.2f %d\n"", c.Prefix, name, t.Rate1(), now) -- fmt.Fprintf(w, ""%s.%s.five-minute %.2f %d\n"", c.Prefix, name, t.Rate5(), now) -- fmt.Fprintf(w, ""%s.%s.fifteen-minute %.2f %d\n"", c.Prefix, name, t.Rate15(), now) -- fmt.Fprintf(w, ""%s.%s.mean-rate %.2f %d\n"", c.Prefix, name, t.RateMean(), now) -- } -- w.Flush() -- }) -- return nil --} -diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/vendor/github.com/rcrowley/go-metrics/healthcheck.go -deleted file mode 100644 -index 445131caee596..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/healthcheck.go -+++ /dev/null -@@ -1,61 +0,0 @@ --package metrics -- --// Healthchecks hold an error value describing an arbitrary up/down status. --type Healthcheck interface { -- Check() -- Error() error -- Healthy() -- Unhealthy(error) --} -- --// NewHealthcheck constructs a new Healthcheck which will use the given --// function to update its status. --func NewHealthcheck(f func(Healthcheck)) Healthcheck { -- if UseNilMetrics { -- return NilHealthcheck{} -- } -- return &StandardHealthcheck{nil, f} --} -- --// NilHealthcheck is a no-op. --type NilHealthcheck struct{} -- --// Check is a no-op. --func (NilHealthcheck) Check() {} -- --// Error is a no-op. --func (NilHealthcheck) Error() error { return nil } -- --// Healthy is a no-op. --func (NilHealthcheck) Healthy() {} -- --// Unhealthy is a no-op. --func (NilHealthcheck) Unhealthy(error) {} -- --// StandardHealthcheck is the standard implementation of a Healthcheck and --// stores the status and a function to call to update the status. --type StandardHealthcheck struct { -- err error -- f func(Healthcheck) --} -- --// Check runs the healthcheck function to update the healthcheck's status. --func (h *StandardHealthcheck) Check() { -- h.f(h) --} -- --// Error returns the healthcheck's status, which will be nil if it is healthy. --func (h *StandardHealthcheck) Error() error { -- return h.err --} -- --// Healthy marks the healthcheck as healthy. --func (h *StandardHealthcheck) Healthy() { -- h.err = nil --} -- --// Unhealthy marks the healthcheck as unhealthy. The error is stored and --// may be retrieved by the Error method. --func (h *StandardHealthcheck) Unhealthy(err error) { -- h.err = err --} -diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/vendor/github.com/rcrowley/go-metrics/histogram.go -deleted file mode 100644 -index dbc837fe4d954..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/histogram.go -+++ /dev/null -@@ -1,202 +0,0 @@ --package metrics -- --// Histograms calculate distribution statistics from a series of int64 values. --type Histogram interface { -- Clear() -- Count() int64 -- Max() int64 -- Mean() float64 -- Min() int64 -- Percentile(float64) float64 -- Percentiles([]float64) []float64 -- Sample() Sample -- Snapshot() Histogram -- StdDev() float64 -- Sum() int64 -- Update(int64) -- Variance() float64 --} -- --// GetOrRegisterHistogram returns an existing Histogram or constructs and --// registers a new StandardHistogram. --func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram { -- if nil == r { -- r = DefaultRegistry -- } -- return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram) --} -- --// NewHistogram constructs a new StandardHistogram from a Sample. --func NewHistogram(s Sample) Histogram { -- if UseNilMetrics { -- return NilHistogram{} -- } -- return &StandardHistogram{sample: s} --} -- --// NewRegisteredHistogram constructs and registers a new StandardHistogram from --// a Sample. --func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram { -- c := NewHistogram(s) -- if nil == r { -- r = DefaultRegistry -- } -- r.Register(name, c) -- return c --} -- --// HistogramSnapshot is a read-only copy of another Histogram. --type HistogramSnapshot struct { -- sample *SampleSnapshot --} -- --// Clear panics. --func (*HistogramSnapshot) Clear() { -- panic(""Clear called on a HistogramSnapshot"") --} -- --// Count returns the number of samples recorded at the time the snapshot was --// taken. --func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() } -- --// Max returns the maximum value in the sample at the time the snapshot was --// taken. --func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() } -- --// Mean returns the mean of the values in the sample at the time the snapshot --// was taken. --func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() } -- --// Min returns the minimum value in the sample at the time the snapshot was --// taken. --func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() } -- --// Percentile returns an arbitrary percentile of values in the sample at the --// time the snapshot was taken. --func (h *HistogramSnapshot) Percentile(p float64) float64 { -- return h.sample.Percentile(p) --} -- --// Percentiles returns a slice of arbitrary percentiles of values in the sample --// at the time the snapshot was taken. --func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 { -- return h.sample.Percentiles(ps) --} -- --// Sample returns the Sample underlying the histogram. --func (h *HistogramSnapshot) Sample() Sample { return h.sample } -- --// Snapshot returns the snapshot. --func (h *HistogramSnapshot) Snapshot() Histogram { return h } -- --// StdDev returns the standard deviation of the values in the sample at the --// time the snapshot was taken. --func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() } -- --// Sum returns the sum in the sample at the time the snapshot was taken. --func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() } -- --// Update panics. --func (*HistogramSnapshot) Update(int64) { -- panic(""Update called on a HistogramSnapshot"") --} -- --// Variance returns the variance of inputs at the time the snapshot was taken. --func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() } -- --// NilHistogram is a no-op Histogram. --type NilHistogram struct{} -- --// Clear is a no-op. --func (NilHistogram) Clear() {} -- --// Count is a no-op. --func (NilHistogram) Count() int64 { return 0 } -- --// Max is a no-op. --func (NilHistogram) Max() int64 { return 0 } -- --// Mean is a no-op. --func (NilHistogram) Mean() float64 { return 0.0 } -- --// Min is a no-op. --func (NilHistogram) Min() int64 { return 0 } -- --// Percentile is a no-op. --func (NilHistogram) Percentile(p float64) float64 { return 0.0 } -- --// Percentiles is a no-op. --func (NilHistogram) Percentiles(ps []float64) []float64 { -- return make([]float64, len(ps)) --} -- --// Sample is a no-op. --func (NilHistogram) Sample() Sample { return NilSample{} } -- --// Snapshot is a no-op. --func (NilHistogram) Snapshot() Histogram { return NilHistogram{} } -- --// StdDev is a no-op. --func (NilHistogram) StdDev() float64 { return 0.0 } -- --// Sum is a no-op. --func (NilHistogram) Sum() int64 { return 0 } -- --// Update is a no-op. --func (NilHistogram) Update(v int64) {} -- --// Variance is a no-op. --func (NilHistogram) Variance() float64 { return 0.0 } -- --// StandardHistogram is the standard implementation of a Histogram and uses a --// Sample to bound its memory use. --type StandardHistogram struct { -- sample Sample --} -- --// Clear clears the histogram and its sample. --func (h *StandardHistogram) Clear() { h.sample.Clear() } -- --// Count returns the number of samples recorded since the histogram was last --// cleared. --func (h *StandardHistogram) Count() int64 { return h.sample.Count() } -- --// Max returns the maximum value in the sample. --func (h *StandardHistogram) Max() int64 { return h.sample.Max() } -- --// Mean returns the mean of the values in the sample. --func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() } -- --// Min returns the minimum value in the sample. --func (h *StandardHistogram) Min() int64 { return h.sample.Min() } -- --// Percentile returns an arbitrary percentile of the values in the sample. --func (h *StandardHistogram) Percentile(p float64) float64 { -- return h.sample.Percentile(p) --} -- --// Percentiles returns a slice of arbitrary percentiles of the values in the --// sample. --func (h *StandardHistogram) Percentiles(ps []float64) []float64 { -- return h.sample.Percentiles(ps) --} -- --// Sample returns the Sample underlying the histogram. --func (h *StandardHistogram) Sample() Sample { return h.sample } -- --// Snapshot returns a read-only copy of the histogram. --func (h *StandardHistogram) Snapshot() Histogram { -- return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)} --} -- --// StdDev returns the standard deviation of the values in the sample. --func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() } -- --// Sum returns the sum in the sample. --func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() } -- --// Update samples a new value. --func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) } -- --// Variance returns the variance of the values in the sample. --func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() } -diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go -deleted file mode 100644 -index 174b9477e92d6..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/json.go -+++ /dev/null -@@ -1,31 +0,0 @@ --package metrics -- --import ( -- ""encoding/json"" -- ""io"" -- ""time"" --) -- --// MarshalJSON returns a byte slice containing a JSON representation of all --// the metrics in the Registry. --func (r *StandardRegistry) MarshalJSON() ([]byte, error) { -- return json.Marshal(r.GetAll()) --} -- --// WriteJSON writes metrics from the given registry periodically to the --// specified io.Writer as JSON. --func WriteJSON(r Registry, d time.Duration, w io.Writer) { -- for _ = range time.Tick(d) { -- WriteJSONOnce(r, w) -- } --} -- --// WriteJSONOnce writes metrics from the given registry to the specified --// io.Writer as JSON. --func WriteJSONOnce(r Registry, w io.Writer) { -- json.NewEncoder(w).Encode(r) --} -- --func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) { -- return json.Marshal(p.GetAll()) --} -diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go -deleted file mode 100644 -index f8074c0457682..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/log.go -+++ /dev/null -@@ -1,80 +0,0 @@ --package metrics -- --import ( -- ""time"" --) -- --type Logger interface { -- Printf(format string, v ...interface{}) --} -- --func Log(r Registry, freq time.Duration, l Logger) { -- LogScaled(r, freq, time.Nanosecond, l) --} -- --// Output each metric in the given registry periodically using the given --// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos. --func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) { -- du := float64(scale) -- duSuffix := scale.String()[1:] -- -- for _ = range time.Tick(freq) { -- r.Each(func(name string, i interface{}) { -- switch metric := i.(type) { -- case Counter: -- l.Printf(""counter %s\n"", name) -- l.Printf("" count: %9d\n"", metric.Count()) -- case Gauge: -- l.Printf(""gauge %s\n"", name) -- l.Printf("" value: %9d\n"", metric.Value()) -- case GaugeFloat64: -- l.Printf(""gauge %s\n"", name) -- l.Printf("" value: %f\n"", metric.Value()) -- case Healthcheck: -- metric.Check() -- l.Printf(""healthcheck %s\n"", name) -- l.Printf("" error: %v\n"", metric.Error()) -- case Histogram: -- h := metric.Snapshot() -- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) -- l.Printf(""histogram %s\n"", name) -- l.Printf("" count: %9d\n"", h.Count()) -- l.Printf("" min: %9d\n"", h.Min()) -- l.Printf("" max: %9d\n"", h.Max()) -- l.Printf("" mean: %12.2f\n"", h.Mean()) -- l.Printf("" stddev: %12.2f\n"", h.StdDev()) -- l.Printf("" median: %12.2f\n"", ps[0]) -- l.Printf("" 75%%: %12.2f\n"", ps[1]) -- l.Printf("" 95%%: %12.2f\n"", ps[2]) -- l.Printf("" 99%%: %12.2f\n"", ps[3]) -- l.Printf("" 99.9%%: %12.2f\n"", ps[4]) -- case Meter: -- m := metric.Snapshot() -- l.Printf(""meter %s\n"", name) -- l.Printf("" count: %9d\n"", m.Count()) -- l.Printf("" 1-min rate: %12.2f\n"", m.Rate1()) -- l.Printf("" 5-min rate: %12.2f\n"", m.Rate5()) -- l.Printf("" 15-min rate: %12.2f\n"", m.Rate15()) -- l.Printf("" mean rate: %12.2f\n"", m.RateMean()) -- case Timer: -- t := metric.Snapshot() -- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) -- l.Printf(""timer %s\n"", name) -- l.Printf("" count: %9d\n"", t.Count()) -- l.Printf("" min: %12.2f%s\n"", float64(t.Min())/du, duSuffix) -- l.Printf("" max: %12.2f%s\n"", float64(t.Max())/du, duSuffix) -- l.Printf("" mean: %12.2f%s\n"", t.Mean()/du, duSuffix) -- l.Printf("" stddev: %12.2f%s\n"", t.StdDev()/du, duSuffix) -- l.Printf("" median: %12.2f%s\n"", ps[0]/du, duSuffix) -- l.Printf("" 75%%: %12.2f%s\n"", ps[1]/du, duSuffix) -- l.Printf("" 95%%: %12.2f%s\n"", ps[2]/du, duSuffix) -- l.Printf("" 99%%: %12.2f%s\n"", ps[3]/du, duSuffix) -- l.Printf("" 99.9%%: %12.2f%s\n"", ps[4]/du, duSuffix) -- l.Printf("" 1-min rate: %12.2f\n"", t.Rate1()) -- l.Printf("" 5-min rate: %12.2f\n"", t.Rate5()) -- l.Printf("" 15-min rate: %12.2f\n"", t.Rate15()) -- l.Printf("" mean rate: %12.2f\n"", t.RateMean()) -- } -- }) -- } --} -diff --git a/vendor/github.com/rcrowley/go-metrics/memory.md b/vendor/github.com/rcrowley/go-metrics/memory.md -deleted file mode 100644 -index 47454f54b6409..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/memory.md -+++ /dev/null -@@ -1,285 +0,0 @@ --Memory usage --============ -- --(Highly unscientific.) -- --Command used to gather static memory usage: -- --```sh --grep ^Vm ""/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status"" --``` -- --Program used to gather baseline memory usage: -- --```go --package main -- --import ""time"" -- --func main() { -- time.Sleep(600e9) --} --``` -- --Baseline ---------- -- --``` --VmPeak: 42604 kB --VmSize: 42604 kB --VmLck: 0 kB --VmHWM: 1120 kB --VmRSS: 1120 kB --VmData: 35460 kB --VmStk: 136 kB --VmExe: 1020 kB --VmLib: 1848 kB --VmPTE: 36 kB --VmSwap: 0 kB --``` -- --Program used to gather metric memory usage (with other metrics being similar): -- --```go --package main -- --import ( -- ""fmt"" -- ""metrics"" -- ""time"" --) -- --func main() { -- fmt.Sprintf(""foo"") -- metrics.NewRegistry() -- time.Sleep(600e9) --} --``` -- --1000 counters registered -------------------------- -- --``` --VmPeak: 44016 kB --VmSize: 44016 kB --VmLck: 0 kB --VmHWM: 1928 kB --VmRSS: 1928 kB --VmData: 36868 kB --VmStk: 136 kB --VmExe: 1024 kB --VmLib: 1848 kB --VmPTE: 40 kB --VmSwap: 0 kB --``` -- --**1.412 kB virtual, TODO 0.808 kB resident per counter.** -- --100000 counters registered ---------------------------- -- --``` --VmPeak: 55024 kB --VmSize: 55024 kB --VmLck: 0 kB --VmHWM: 12440 kB --VmRSS: 12440 kB --VmData: 47876 kB --VmStk: 136 kB --VmExe: 1024 kB --VmLib: 1848 kB --VmPTE: 64 kB --VmSwap: 0 kB --``` -- --**0.1242 kB virtual, 0.1132 kB resident per counter.** -- --1000 gauges registered ------------------------ -- --``` --VmPeak: 44012 kB --VmSize: 44012 kB --VmLck: 0 kB --VmHWM: 1928 kB --VmRSS: 1928 kB --VmData: 36868 kB --VmStk: 136 kB --VmExe: 1020 kB --VmLib: 1848 kB --VmPTE: 40 kB --VmSwap: 0 kB --``` -- --**1.408 kB virtual, 0.808 kB resident per counter.** -- --100000 gauges registered -------------------------- -- --``` --VmPeak: 55020 kB --VmSize: 55020 kB --VmLck: 0 kB --VmHWM: 12432 kB --VmRSS: 12432 kB --VmData: 47876 kB --VmStk: 136 kB --VmExe: 1020 kB --VmLib: 1848 kB --VmPTE: 60 kB --VmSwap: 0 kB --``` -- --**0.12416 kB virtual, 0.11312 resident per gauge.** -- --1000 histograms with a uniform sample size of 1028 ---------------------------------------------------- -- --``` --VmPeak: 72272 kB --VmSize: 72272 kB --VmLck: 0 kB --VmHWM: 16204 kB --VmRSS: 16204 kB --VmData: 65100 kB --VmStk: 136 kB --VmExe: 1048 kB --VmLib: 1848 kB --VmPTE: 80 kB --VmSwap: 0 kB --``` -- --**29.668 kB virtual, TODO 15.084 resident per histogram.** -- --10000 histograms with a uniform sample size of 1028 ----------------------------------------------------- -- --``` --VmPeak: 256912 kB --VmSize: 256912 kB --VmLck: 0 kB --VmHWM: 146204 kB --VmRSS: 146204 kB --VmData: 249740 kB --VmStk: 136 kB --VmExe: 1048 kB --VmLib: 1848 kB --VmPTE: 448 kB --VmSwap: 0 kB --``` -- --**21.4308 kB virtual, 14.5084 kB resident per histogram.** -- --50000 histograms with a uniform sample size of 1028 ----------------------------------------------------- -- --``` --VmPeak: 908112 kB --VmSize: 908112 kB --VmLck: 0 kB --VmHWM: 645832 kB --VmRSS: 645588 kB --VmData: 900940 kB --VmStk: 136 kB --VmExe: 1048 kB --VmLib: 1848 kB --VmPTE: 1716 kB --VmSwap: 1544 kB --``` -- --**17.31016 kB virtual, 12.88936 kB resident per histogram.** -- --1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 --------------------------------------------------------------------------------------- -- --``` --VmPeak: 62480 kB --VmSize: 62480 kB --VmLck: 0 kB --VmHWM: 11572 kB --VmRSS: 11572 kB --VmData: 55308 kB --VmStk: 136 kB --VmExe: 1048 kB --VmLib: 1848 kB --VmPTE: 64 kB --VmSwap: 0 kB --``` -- --**19.876 kB virtual, 10.452 kB resident per histogram.** -- --10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 ---------------------------------------------------------------------------------------- -- --``` --VmPeak: 153296 kB --VmSize: 153296 kB --VmLck: 0 kB --VmHWM: 101176 kB --VmRSS: 101176 kB --VmData: 146124 kB --VmStk: 136 kB --VmExe: 1048 kB --VmLib: 1848 kB --VmPTE: 240 kB --VmSwap: 0 kB --``` -- --**11.0692 kB virtual, 10.0056 kB resident per histogram.** -- --50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 ---------------------------------------------------------------------------------------- -- --``` --VmPeak: 557264 kB --VmSize: 557264 kB --VmLck: 0 kB --VmHWM: 501056 kB --VmRSS: 501056 kB --VmData: 550092 kB --VmStk: 136 kB --VmExe: 1048 kB --VmLib: 1848 kB --VmPTE: 1032 kB --VmSwap: 0 kB --``` -- --**10.2932 kB virtual, 9.99872 kB resident per histogram.** -- --1000 meters ------------- -- --``` --VmPeak: 74504 kB --VmSize: 74504 kB --VmLck: 0 kB --VmHWM: 24124 kB --VmRSS: 24124 kB --VmData: 67340 kB --VmStk: 136 kB --VmExe: 1040 kB --VmLib: 1848 kB --VmPTE: 92 kB --VmSwap: 0 kB --``` -- --**31.9 kB virtual, 23.004 kB resident per meter.** -- --10000 meters -------------- -- --``` --VmPeak: 278920 kB --VmSize: 278920 kB --VmLck: 0 kB --VmHWM: 227300 kB --VmRSS: 227300 kB --VmData: 271756 kB --VmStk: 136 kB --VmExe: 1040 kB --VmLib: 1848 kB --VmPTE: 488 kB --VmSwap: 0 kB --``` -- --**23.6316 kB virtual, 22.618 kB resident per meter.** -diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go -deleted file mode 100644 -index 7807406a3be5b..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/meter.go -+++ /dev/null -@@ -1,257 +0,0 @@ --package metrics -- --import ( -- ""math"" -- ""sync"" -- ""sync/atomic"" -- ""time"" --) -- --// Meters count events to produce exponentially-weighted moving average rates --// at one-, five-, and fifteen-minutes and a mean rate. --type Meter interface { -- Count() int64 -- Mark(int64) -- Rate1() float64 -- Rate5() float64 -- Rate15() float64 -- RateMean() float64 -- Snapshot() Meter -- Stop() --} -- --// GetOrRegisterMeter returns an existing Meter or constructs and registers a --// new StandardMeter. --// Be sure to unregister the meter from the registry once it is of no use to --// allow for garbage collection. --func GetOrRegisterMeter(name string, r Registry) Meter { -- if nil == r { -- r = DefaultRegistry -- } -- return r.GetOrRegister(name, NewMeter).(Meter) --} -- --// NewMeter constructs a new StandardMeter and launches a goroutine. --// Be sure to call Stop() once the meter is of no use to allow for garbage collection. --func NewMeter() Meter { -- if UseNilMetrics { -- return NilMeter{} -- } -- m := newStandardMeter() -- arbiter.Lock() -- defer arbiter.Unlock() -- arbiter.meters[m] = struct{}{} -- if !arbiter.started { -- arbiter.started = true -- go arbiter.tick() -- } -- return m --} -- --// NewMeter constructs and registers a new StandardMeter and launches a --// goroutine. --// Be sure to unregister the meter from the registry once it is of no use to --// allow for garbage collection. --func NewRegisteredMeter(name string, r Registry) Meter { -- c := NewMeter() -- if nil == r { -- r = DefaultRegistry -- } -- r.Register(name, c) -- return c --} -- --// MeterSnapshot is a read-only copy of another Meter. --type MeterSnapshot struct { -- count int64 -- rate1, rate5, rate15, rateMean uint64 --} -- --// Count returns the count of events at the time the snapshot was taken. --func (m *MeterSnapshot) Count() int64 { return m.count } -- --// Mark panics. --func (*MeterSnapshot) Mark(n int64) { -- panic(""Mark called on a MeterSnapshot"") --} -- --// Rate1 returns the one-minute moving average rate of events per second at the --// time the snapshot was taken. --func (m *MeterSnapshot) Rate1() float64 { return math.Float64frombits(m.rate1) } -- --// Rate5 returns the five-minute moving average rate of events per second at --// the time the snapshot was taken. --func (m *MeterSnapshot) Rate5() float64 { return math.Float64frombits(m.rate5) } -- --// Rate15 returns the fifteen-minute moving average rate of events per second --// at the time the snapshot was taken. --func (m *MeterSnapshot) Rate15() float64 { return math.Float64frombits(m.rate15) } -- --// RateMean returns the meter's mean rate of events per second at the time the --// snapshot was taken. --func (m *MeterSnapshot) RateMean() float64 { return math.Float64frombits(m.rateMean) } -- --// Snapshot returns the snapshot. --func (m *MeterSnapshot) Snapshot() Meter { return m } -- --// Stop is a no-op. --func (m *MeterSnapshot) Stop() {} -- --// NilMeter is a no-op Meter. --type NilMeter struct{} -- --// Count is a no-op. --func (NilMeter) Count() int64 { return 0 } -- --// Mark is a no-op. --func (NilMeter) Mark(n int64) {} -- --// Rate1 is a no-op. --func (NilMeter) Rate1() float64 { return 0.0 } -- --// Rate5 is a no-op. --func (NilMeter) Rate5() float64 { return 0.0 } -- --// Rate15is a no-op. --func (NilMeter) Rate15() float64 { return 0.0 } -- --// RateMean is a no-op. --func (NilMeter) RateMean() float64 { return 0.0 } -- --// Snapshot is a no-op. --func (NilMeter) Snapshot() Meter { return NilMeter{} } -- --// Stop is a no-op. --func (NilMeter) Stop() {} -- --// StandardMeter is the standard implementation of a Meter. --type StandardMeter struct { -- // Only used on stop. -- lock sync.Mutex -- snapshot *MeterSnapshot -- a1, a5, a15 EWMA -- startTime time.Time -- stopped uint32 --} -- --func newStandardMeter() *StandardMeter { -- return &StandardMeter{ -- snapshot: &MeterSnapshot{}, -- a1: NewEWMA1(), -- a5: NewEWMA5(), -- a15: NewEWMA15(), -- startTime: time.Now(), -- } --} -- --// Stop stops the meter, Mark() will be a no-op if you use it after being stopped. --func (m *StandardMeter) Stop() { -- m.lock.Lock() -- stopped := m.stopped -- m.stopped = 1 -- m.lock.Unlock() -- if stopped != 1 { -- arbiter.Lock() -- delete(arbiter.meters, m) -- arbiter.Unlock() -- } --} -- --// Count returns the number of events recorded. --func (m *StandardMeter) Count() int64 { -- return atomic.LoadInt64(&m.snapshot.count) --} -- --// Mark records the occurance of n events. --func (m *StandardMeter) Mark(n int64) { -- if atomic.LoadUint32(&m.stopped) == 1 { -- return -- } -- -- atomic.AddInt64(&m.snapshot.count, n) -- -- m.a1.Update(n) -- m.a5.Update(n) -- m.a15.Update(n) -- m.updateSnapshot() --} -- --// Rate1 returns the one-minute moving average rate of events per second. --func (m *StandardMeter) Rate1() float64 { -- return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate1)) --} -- --// Rate5 returns the five-minute moving average rate of events per second. --func (m *StandardMeter) Rate5() float64 { -- return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate5)) --} -- --// Rate15 returns the fifteen-minute moving average rate of events per second. --func (m *StandardMeter) Rate15() float64 { -- return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate15)) --} -- --// RateMean returns the meter's mean rate of events per second. --func (m *StandardMeter) RateMean() float64 { -- return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rateMean)) --} -- --// Snapshot returns a read-only copy of the meter. --func (m *StandardMeter) Snapshot() Meter { -- copiedSnapshot := MeterSnapshot{ -- count: atomic.LoadInt64(&m.snapshot.count), -- rate1: atomic.LoadUint64(&m.snapshot.rate1), -- rate5: atomic.LoadUint64(&m.snapshot.rate5), -- rate15: atomic.LoadUint64(&m.snapshot.rate15), -- rateMean: atomic.LoadUint64(&m.snapshot.rateMean), -- } -- return &copiedSnapshot --} -- --func (m *StandardMeter) updateSnapshot() { -- rate1 := math.Float64bits(m.a1.Rate()) -- rate5 := math.Float64bits(m.a5.Rate()) -- rate15 := math.Float64bits(m.a15.Rate()) -- rateMean := math.Float64bits(float64(m.Count()) / time.Since(m.startTime).Seconds()) -- -- atomic.StoreUint64(&m.snapshot.rate1, rate1) -- atomic.StoreUint64(&m.snapshot.rate5, rate5) -- atomic.StoreUint64(&m.snapshot.rate15, rate15) -- atomic.StoreUint64(&m.snapshot.rateMean, rateMean) --} -- --func (m *StandardMeter) tick() { -- m.a1.Tick() -- m.a5.Tick() -- m.a15.Tick() -- m.updateSnapshot() --} -- --// meterArbiter ticks meters every 5s from a single goroutine. --// meters are references in a set for future stopping. --type meterArbiter struct { -- sync.RWMutex -- started bool -- meters map[*StandardMeter]struct{} -- ticker *time.Ticker --} -- --var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})} -- --// Ticks meters on the scheduled interval --func (ma *meterArbiter) tick() { -- for { -- select { -- case <-ma.ticker.C: -- ma.tickMeters() -- } -- } --} -- --func (ma *meterArbiter) tickMeters() { -- ma.RLock() -- defer ma.RUnlock() -- for meter := range ma.meters { -- meter.tick() -- } --} -diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go -deleted file mode 100644 -index b97a49ed123ee..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/metrics.go -+++ /dev/null -@@ -1,13 +0,0 @@ --// Go port of Coda Hale's Metrics library --// --// --// --// Coda Hale's original work: --package metrics -- --// UseNilMetrics is checked by the constructor functions for all of the --// standard metrics. If it is true, the metric returned is a stub. --// --// This global kill-switch helps quantify the observer effect and makes --// for less cluttered pprof profiles. --var UseNilMetrics bool = false -diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/vendor/github.com/rcrowley/go-metrics/opentsdb.go -deleted file mode 100644 -index 266b6c93d21de..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/opentsdb.go -+++ /dev/null -@@ -1,119 +0,0 @@ --package metrics -- --import ( -- ""bufio"" -- ""fmt"" -- ""log"" -- ""net"" -- ""os"" -- ""strings"" -- ""time"" --) -- --var shortHostName string = """" -- --// OpenTSDBConfig provides a container with configuration parameters for --// the OpenTSDB exporter --type OpenTSDBConfig struct { -- Addr *net.TCPAddr // Network address to connect to -- Registry Registry // Registry to be exported -- FlushInterval time.Duration // Flush interval -- DurationUnit time.Duration // Time conversion unit for durations -- Prefix string // Prefix to be prepended to metric names --} -- --// OpenTSDB is a blocking exporter function which reports metrics in r --// to a TSDB server located at addr, flushing them every d duration --// and prepending metric names with prefix. --func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { -- OpenTSDBWithConfig(OpenTSDBConfig{ -- Addr: addr, -- Registry: r, -- FlushInterval: d, -- DurationUnit: time.Nanosecond, -- Prefix: prefix, -- }) --} -- --// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB, --// but it takes a OpenTSDBConfig instead. --func OpenTSDBWithConfig(c OpenTSDBConfig) { -- for _ = range time.Tick(c.FlushInterval) { -- if err := openTSDB(&c); nil != err { -- log.Println(err) -- } -- } --} -- --func getShortHostname() string { -- if shortHostName == """" { -- host, _ := os.Hostname() -- if index := strings.Index(host, "".""); index > 0 { -- shortHostName = host[:index] -- } else { -- shortHostName = host -- } -- } -- return shortHostName --} -- --func openTSDB(c *OpenTSDBConfig) error { -- shortHostname := getShortHostname() -- now := time.Now().Unix() -- du := float64(c.DurationUnit) -- conn, err := net.DialTCP(""tcp"", nil, c.Addr) -- if nil != err { -- return err -- } -- defer conn.Close() -- w := bufio.NewWriter(conn) -- c.Registry.Each(func(name string, i interface{}) { -- switch metric := i.(type) { -- case Counter: -- fmt.Fprintf(w, ""put %s.%s.count %d %d host=%s\n"", c.Prefix, name, now, metric.Count(), shortHostname) -- case Gauge: -- fmt.Fprintf(w, ""put %s.%s.value %d %d host=%s\n"", c.Prefix, name, now, metric.Value(), shortHostname) -- case GaugeFloat64: -- fmt.Fprintf(w, ""put %s.%s.value %d %f host=%s\n"", c.Prefix, name, now, metric.Value(), shortHostname) -- case Histogram: -- h := metric.Snapshot() -- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) -- fmt.Fprintf(w, ""put %s.%s.count %d %d host=%s\n"", c.Prefix, name, now, h.Count(), shortHostname) -- fmt.Fprintf(w, ""put %s.%s.min %d %d host=%s\n"", c.Prefix, name, now, h.Min(), shortHostname) -- fmt.Fprintf(w, ""put %s.%s.max %d %d host=%s\n"", c.Prefix, name, now, h.Max(), shortHostname) -- fmt.Fprintf(w, ""put %s.%s.mean %d %.2f host=%s\n"", c.Prefix, name, now, h.Mean(), shortHostname) -- fmt.Fprintf(w, ""put %s.%s.std-dev %d %.2f host=%s\n"", c.Prefix, name, now, h.StdDev(), shortHostname) -- fmt.Fprintf(w, ""put %s.%s.50-percentile %d %.2f host=%s\n"", c.Prefix, name, now, ps[0], shortHostname) -- fmt.Fprintf(w, ""put %s.%s.75-percentile %d %.2f host=%s\n"", c.Prefix, name, now, ps[1], shortHostname) -- fmt.Fprintf(w, ""put %s.%s.95-percentile %d %.2f host=%s\n"", c.Prefix, name, now, ps[2], shortHostname) -- fmt.Fprintf(w, ""put %s.%s.99-percentile %d %.2f host=%s\n"", c.Prefix, name, now, ps[3], shortHostname) -- fmt.Fprintf(w, ""put %s.%s.999-percentile %d %.2f host=%s\n"", c.Prefix, name, now, ps[4], shortHostname) -- case Meter: -- m := metric.Snapshot() -- fmt.Fprintf(w, ""put %s.%s.count %d %d host=%s\n"", c.Prefix, name, now, m.Count(), shortHostname) -- fmt.Fprintf(w, ""put %s.%s.one-minute %d %.2f host=%s\n"", c.Prefix, name, now, m.Rate1(), shortHostname) -- fmt.Fprintf(w, ""put %s.%s.five-minute %d %.2f host=%s\n"", c.Prefix, name, now, m.Rate5(), shortHostname) -- fmt.Fprintf(w, ""put %s.%s.fifteen-minute %d %.2f host=%s\n"", c.Prefix, name, now, m.Rate15(), shortHostname) -- fmt.Fprintf(w, ""put %s.%s.mean %d %.2f host=%s\n"", c.Prefix, name, now, m.RateMean(), shortHostname) -- case Timer: -- t := metric.Snapshot() -- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) -- fmt.Fprintf(w, ""put %s.%s.count %d %d host=%s\n"", c.Prefix, name, now, t.Count(), shortHostname) -- fmt.Fprintf(w, ""put %s.%s.min %d %d host=%s\n"", c.Prefix, name, now, t.Min()/int64(du), shortHostname) -- fmt.Fprintf(w, ""put %s.%s.max %d %d host=%s\n"", c.Prefix, name, now, t.Max()/int64(du), shortHostname) -- fmt.Fprintf(w, ""put %s.%s.mean %d %.2f host=%s\n"", c.Prefix, name, now, t.Mean()/du, shortHostname) -- fmt.Fprintf(w, ""put %s.%s.std-dev %d %.2f host=%s\n"", c.Prefix, name, now, t.StdDev()/du, shortHostname) -- fmt.Fprintf(w, ""put %s.%s.50-percentile %d %.2f host=%s\n"", c.Prefix, name, now, ps[0]/du, shortHostname) -- fmt.Fprintf(w, ""put %s.%s.75-percentile %d %.2f host=%s\n"", c.Prefix, name, now, ps[1]/du, shortHostname) -- fmt.Fprintf(w, ""put %s.%s.95-percentile %d %.2f host=%s\n"", c.Prefix, name, now, ps[2]/du, shortHostname) -- fmt.Fprintf(w, ""put %s.%s.99-percentile %d %.2f host=%s\n"", c.Prefix, name, now, ps[3]/du, shortHostname) -- fmt.Fprintf(w, ""put %s.%s.999-percentile %d %.2f host=%s\n"", c.Prefix, name, now, ps[4]/du, shortHostname) -- fmt.Fprintf(w, ""put %s.%s.one-minute %d %.2f host=%s\n"", c.Prefix, name, now, t.Rate1(), shortHostname) -- fmt.Fprintf(w, ""put %s.%s.five-minute %d %.2f host=%s\n"", c.Prefix, name, now, t.Rate5(), shortHostname) -- fmt.Fprintf(w, ""put %s.%s.fifteen-minute %d %.2f host=%s\n"", c.Prefix, name, now, t.Rate15(), shortHostname) -- fmt.Fprintf(w, ""put %s.%s.mean-rate %d %.2f host=%s\n"", c.Prefix, name, now, t.RateMean(), shortHostname) -- } -- w.Flush() -- }) -- return nil --} -diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go -deleted file mode 100644 -index b3bab64e15b38..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/registry.go -+++ /dev/null -@@ -1,363 +0,0 @@ --package metrics -- --import ( -- ""fmt"" -- ""reflect"" -- ""strings"" -- ""sync"" --) -- --// DuplicateMetric is the error returned by Registry.Register when a metric --// already exists. If you mean to Register that metric you must first --// Unregister the existing metric. --type DuplicateMetric string -- --func (err DuplicateMetric) Error() string { -- return fmt.Sprintf(""duplicate metric: %s"", string(err)) --} -- --// A Registry holds references to a set of metrics by name and can iterate --// over them, calling callback functions provided by the user. --// --// This is an interface so as to encourage other structs to implement --// the Registry API as appropriate. --type Registry interface { -- -- // Call the given function for each registered metric. -- Each(func(string, interface{})) -- -- // Get the metric by the given name or nil if none is registered. -- Get(string) interface{} -- -- // GetAll metrics in the Registry. -- GetAll() map[string]map[string]interface{} -- -- // Gets an existing metric or registers the given one. -- // The interface can be the metric to register if not found in registry, -- // or a function returning the metric for lazy instantiation. -- GetOrRegister(string, interface{}) interface{} -- -- // Register the given metric under the given name. -- Register(string, interface{}) error -- -- // Run all registered healthchecks. -- RunHealthchecks() -- -- // Unregister the metric with the given name. -- Unregister(string) -- -- // Unregister all metrics. (Mostly for testing.) -- UnregisterAll() --} -- --// The standard implementation of a Registry is a mutex-protected map --// of names to metrics. --type StandardRegistry struct { -- metrics map[string]interface{} -- mutex sync.RWMutex --} -- --// Create a new registry. --func NewRegistry() Registry { -- return &StandardRegistry{metrics: make(map[string]interface{})} --} -- --// Call the given function for each registered metric. --func (r *StandardRegistry) Each(f func(string, interface{})) { -- for name, i := range r.registered() { -- f(name, i) -- } --} -- --// Get the metric by the given name or nil if none is registered. --func (r *StandardRegistry) Get(name string) interface{} { -- r.mutex.RLock() -- defer r.mutex.RUnlock() -- return r.metrics[name] --} -- --// Gets an existing metric or creates and registers a new one. Threadsafe --// alternative to calling Get and Register on failure. --// The interface can be the metric to register if not found in registry, --// or a function returning the metric for lazy instantiation. --func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} { -- // access the read lock first which should be re-entrant -- r.mutex.RLock() -- metric, ok := r.metrics[name] -- r.mutex.RUnlock() -- if ok { -- return metric -- } -- -- // only take the write lock if we'll be modifying the metrics map -- r.mutex.Lock() -- defer r.mutex.Unlock() -- if metric, ok := r.metrics[name]; ok { -- return metric -- } -- if v := reflect.ValueOf(i); v.Kind() == reflect.Func { -- i = v.Call(nil)[0].Interface() -- } -- r.register(name, i) -- return i --} -- --// Register the given metric under the given name. Returns a DuplicateMetric --// if a metric by the given name is already registered. --func (r *StandardRegistry) Register(name string, i interface{}) error { -- r.mutex.Lock() -- defer r.mutex.Unlock() -- return r.register(name, i) --} -- --// Run all registered healthchecks. --func (r *StandardRegistry) RunHealthchecks() { -- r.mutex.RLock() -- defer r.mutex.RUnlock() -- for _, i := range r.metrics { -- if h, ok := i.(Healthcheck); ok { -- h.Check() -- } -- } --} -- --// GetAll metrics in the Registry --func (r *StandardRegistry) GetAll() map[string]map[string]interface{} { -- data := make(map[string]map[string]interface{}) -- r.Each(func(name string, i interface{}) { -- values := make(map[string]interface{}) -- switch metric := i.(type) { -- case Counter: -- values[""count""] = metric.Count() -- case Gauge: -- values[""value""] = metric.Value() -- case GaugeFloat64: -- values[""value""] = metric.Value() -- case Healthcheck: -- values[""error""] = nil -- metric.Check() -- if err := metric.Error(); nil != err { -- values[""error""] = metric.Error().Error() -- } -- case Histogram: -- h := metric.Snapshot() -- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) -- values[""count""] = h.Count() -- values[""min""] = h.Min() -- values[""max""] = h.Max() -- values[""mean""] = h.Mean() -- values[""stddev""] = h.StdDev() -- values[""median""] = ps[0] -- values[""75%""] = ps[1] -- values[""95%""] = ps[2] -- values[""99%""] = ps[3] -- values[""99.9%""] = ps[4] -- case Meter: -- m := metric.Snapshot() -- values[""count""] = m.Count() -- values[""1m.rate""] = m.Rate1() -- values[""5m.rate""] = m.Rate5() -- values[""15m.rate""] = m.Rate15() -- values[""mean.rate""] = m.RateMean() -- case Timer: -- t := metric.Snapshot() -- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) -- values[""count""] = t.Count() -- values[""min""] = t.Min() -- values[""max""] = t.Max() -- values[""mean""] = t.Mean() -- values[""stddev""] = t.StdDev() -- values[""median""] = ps[0] -- values[""75%""] = ps[1] -- values[""95%""] = ps[2] -- values[""99%""] = ps[3] -- values[""99.9%""] = ps[4] -- values[""1m.rate""] = t.Rate1() -- values[""5m.rate""] = t.Rate5() -- values[""15m.rate""] = t.Rate15() -- values[""mean.rate""] = t.RateMean() -- } -- data[name] = values -- }) -- return data --} -- --// Unregister the metric with the given name. --func (r *StandardRegistry) Unregister(name string) { -- r.mutex.Lock() -- defer r.mutex.Unlock() -- r.stop(name) -- delete(r.metrics, name) --} -- --// Unregister all metrics. (Mostly for testing.) --func (r *StandardRegistry) UnregisterAll() { -- r.mutex.Lock() -- defer r.mutex.Unlock() -- for name, _ := range r.metrics { -- r.stop(name) -- delete(r.metrics, name) -- } --} -- --func (r *StandardRegistry) register(name string, i interface{}) error { -- if _, ok := r.metrics[name]; ok { -- return DuplicateMetric(name) -- } -- switch i.(type) { -- case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer: -- r.metrics[name] = i -- } -- return nil --} -- --func (r *StandardRegistry) registered() map[string]interface{} { -- r.mutex.Lock() -- defer r.mutex.Unlock() -- metrics := make(map[string]interface{}, len(r.metrics)) -- for name, i := range r.metrics { -- metrics[name] = i -- } -- return metrics --} -- --func (r *StandardRegistry) stop(name string) { -- if i, ok := r.metrics[name]; ok { -- if s, ok := i.(Stoppable); ok { -- s.Stop() -- } -- } --} -- --// Stoppable defines the metrics which has to be stopped. --type Stoppable interface { -- Stop() --} -- --type PrefixedRegistry struct { -- underlying Registry -- prefix string --} -- --func NewPrefixedRegistry(prefix string) Registry { -- return &PrefixedRegistry{ -- underlying: NewRegistry(), -- prefix: prefix, -- } --} -- --func NewPrefixedChildRegistry(parent Registry, prefix string) Registry { -- return &PrefixedRegistry{ -- underlying: parent, -- prefix: prefix, -- } --} -- --// Call the given function for each registered metric. --func (r *PrefixedRegistry) Each(fn func(string, interface{})) { -- wrappedFn := func(prefix string) func(string, interface{}) { -- return func(name string, iface interface{}) { -- if strings.HasPrefix(name, prefix) { -- fn(name, iface) -- } else { -- return -- } -- } -- } -- -- baseRegistry, prefix := findPrefix(r, """") -- baseRegistry.Each(wrappedFn(prefix)) --} -- --func findPrefix(registry Registry, prefix string) (Registry, string) { -- switch r := registry.(type) { -- case *PrefixedRegistry: -- return findPrefix(r.underlying, r.prefix+prefix) -- case *StandardRegistry: -- return r, prefix -- } -- return nil, """" --} -- --// Get the metric by the given name or nil if none is registered. --func (r *PrefixedRegistry) Get(name string) interface{} { -- realName := r.prefix + name -- return r.underlying.Get(realName) --} -- --// Gets an existing metric or registers the given one. --// The interface can be the metric to register if not found in registry, --// or a function returning the metric for lazy instantiation. --func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} { -- realName := r.prefix + name -- return r.underlying.GetOrRegister(realName, metric) --} -- --// Register the given metric under the given name. The name will be prefixed. --func (r *PrefixedRegistry) Register(name string, metric interface{}) error { -- realName := r.prefix + name -- return r.underlying.Register(realName, metric) --} -- --// Run all registered healthchecks. --func (r *PrefixedRegistry) RunHealthchecks() { -- r.underlying.RunHealthchecks() --} -- --// GetAll metrics in the Registry --func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} { -- return r.underlying.GetAll() --} -- --// Unregister the metric with the given name. The name will be prefixed. --func (r *PrefixedRegistry) Unregister(name string) { -- realName := r.prefix + name -- r.underlying.Unregister(realName) --} -- --// Unregister all metrics. (Mostly for testing.) --func (r *PrefixedRegistry) UnregisterAll() { -- r.underlying.UnregisterAll() --} -- --var DefaultRegistry Registry = NewRegistry() -- --// Call the given function for each registered metric. --func Each(f func(string, interface{})) { -- DefaultRegistry.Each(f) --} -- --// Get the metric by the given name or nil if none is registered. --func Get(name string) interface{} { -- return DefaultRegistry.Get(name) --} -- --// Gets an existing metric or creates and registers a new one. Threadsafe --// alternative to calling Get and Register on failure. --func GetOrRegister(name string, i interface{}) interface{} { -- return DefaultRegistry.GetOrRegister(name, i) --} -- --// Register the given metric under the given name. Returns a DuplicateMetric --// if a metric by the given name is already registered. --func Register(name string, i interface{}) error { -- return DefaultRegistry.Register(name, i) --} -- --// Register the given metric under the given name. Panics if a metric by the --// given name is already registered. --func MustRegister(name string, i interface{}) { -- if err := Register(name, i); err != nil { -- panic(err) -- } --} -- --// Run all registered healthchecks. --func RunHealthchecks() { -- DefaultRegistry.RunHealthchecks() --} -- --// Unregister the metric with the given name. --func Unregister(name string) { -- DefaultRegistry.Unregister(name) --} -diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go -deleted file mode 100644 -index 11c6b785a0f8f..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/runtime.go -+++ /dev/null -@@ -1,212 +0,0 @@ --package metrics -- --import ( -- ""runtime"" -- ""runtime/pprof"" -- ""time"" --) -- --var ( -- memStats runtime.MemStats -- runtimeMetrics struct { -- MemStats struct { -- Alloc Gauge -- BuckHashSys Gauge -- DebugGC Gauge -- EnableGC Gauge -- Frees Gauge -- HeapAlloc Gauge -- HeapIdle Gauge -- HeapInuse Gauge -- HeapObjects Gauge -- HeapReleased Gauge -- HeapSys Gauge -- LastGC Gauge -- Lookups Gauge -- Mallocs Gauge -- MCacheInuse Gauge -- MCacheSys Gauge -- MSpanInuse Gauge -- MSpanSys Gauge -- NextGC Gauge -- NumGC Gauge -- GCCPUFraction GaugeFloat64 -- PauseNs Histogram -- PauseTotalNs Gauge -- StackInuse Gauge -- StackSys Gauge -- Sys Gauge -- TotalAlloc Gauge -- } -- NumCgoCall Gauge -- NumGoroutine Gauge -- NumThread Gauge -- ReadMemStats Timer -- } -- frees uint64 -- lookups uint64 -- mallocs uint64 -- numGC uint32 -- numCgoCalls int64 -- -- threadCreateProfile = pprof.Lookup(""threadcreate"") --) -- --// Capture new values for the Go runtime statistics exported in --// runtime.MemStats. This is designed to be called as a goroutine. --func CaptureRuntimeMemStats(r Registry, d time.Duration) { -- for _ = range time.Tick(d) { -- CaptureRuntimeMemStatsOnce(r) -- } --} -- --// Capture new values for the Go runtime statistics exported in --// runtime.MemStats. This is designed to be called in a background --// goroutine. Giving a registry which has not been given to --// RegisterRuntimeMemStats will panic. --// --// Be very careful with this because runtime.ReadMemStats calls the C --// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld() --// and that last one does what it says on the tin. --func CaptureRuntimeMemStatsOnce(r Registry) { -- t := time.Now() -- runtime.ReadMemStats(&memStats) // This takes 50-200us. -- runtimeMetrics.ReadMemStats.UpdateSince(t) -- -- runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc)) -- runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys)) -- if memStats.DebugGC { -- runtimeMetrics.MemStats.DebugGC.Update(1) -- } else { -- runtimeMetrics.MemStats.DebugGC.Update(0) -- } -- if memStats.EnableGC { -- runtimeMetrics.MemStats.EnableGC.Update(1) -- } else { -- runtimeMetrics.MemStats.EnableGC.Update(0) -- } -- -- runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees)) -- runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc)) -- runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle)) -- runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse)) -- runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects)) -- runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased)) -- runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys)) -- runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC)) -- runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups)) -- runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs)) -- runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse)) -- runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys)) -- runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse)) -- runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys)) -- runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC)) -- runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC)) -- runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats)) -- -- // -- i := numGC % uint32(len(memStats.PauseNs)) -- ii := memStats.NumGC % uint32(len(memStats.PauseNs)) -- if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) { -- for i = 0; i < uint32(len(memStats.PauseNs)); i++ { -- runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) -- } -- } else { -- if i > ii { -- for ; i < uint32(len(memStats.PauseNs)); i++ { -- runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) -- } -- i = 0 -- } -- for ; i < ii; i++ { -- runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) -- } -- } -- frees = memStats.Frees -- lookups = memStats.Lookups -- mallocs = memStats.Mallocs -- numGC = memStats.NumGC -- -- runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs)) -- runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse)) -- runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys)) -- runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys)) -- runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc)) -- -- currentNumCgoCalls := numCgoCall() -- runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls) -- numCgoCalls = currentNumCgoCalls -- -- runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine())) -- -- runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count())) --} -- --// Register runtimeMetrics for the Go runtime statistics exported in runtime and --// specifically runtime.MemStats. The runtimeMetrics are named by their --// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc. --func RegisterRuntimeMemStats(r Registry) { -- runtimeMetrics.MemStats.Alloc = NewGauge() -- runtimeMetrics.MemStats.BuckHashSys = NewGauge() -- runtimeMetrics.MemStats.DebugGC = NewGauge() -- runtimeMetrics.MemStats.EnableGC = NewGauge() -- runtimeMetrics.MemStats.Frees = NewGauge() -- runtimeMetrics.MemStats.HeapAlloc = NewGauge() -- runtimeMetrics.MemStats.HeapIdle = NewGauge() -- runtimeMetrics.MemStats.HeapInuse = NewGauge() -- runtimeMetrics.MemStats.HeapObjects = NewGauge() -- runtimeMetrics.MemStats.HeapReleased = NewGauge() -- runtimeMetrics.MemStats.HeapSys = NewGauge() -- runtimeMetrics.MemStats.LastGC = NewGauge() -- runtimeMetrics.MemStats.Lookups = NewGauge() -- runtimeMetrics.MemStats.Mallocs = NewGauge() -- runtimeMetrics.MemStats.MCacheInuse = NewGauge() -- runtimeMetrics.MemStats.MCacheSys = NewGauge() -- runtimeMetrics.MemStats.MSpanInuse = NewGauge() -- runtimeMetrics.MemStats.MSpanSys = NewGauge() -- runtimeMetrics.MemStats.NextGC = NewGauge() -- runtimeMetrics.MemStats.NumGC = NewGauge() -- runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64() -- runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015)) -- runtimeMetrics.MemStats.PauseTotalNs = NewGauge() -- runtimeMetrics.MemStats.StackInuse = NewGauge() -- runtimeMetrics.MemStats.StackSys = NewGauge() -- runtimeMetrics.MemStats.Sys = NewGauge() -- runtimeMetrics.MemStats.TotalAlloc = NewGauge() -- runtimeMetrics.NumCgoCall = NewGauge() -- runtimeMetrics.NumGoroutine = NewGauge() -- runtimeMetrics.NumThread = NewGauge() -- runtimeMetrics.ReadMemStats = NewTimer() -- -- r.Register(""runtime.MemStats.Alloc"", runtimeMetrics.MemStats.Alloc) -- r.Register(""runtime.MemStats.BuckHashSys"", runtimeMetrics.MemStats.BuckHashSys) -- r.Register(""runtime.MemStats.DebugGC"", runtimeMetrics.MemStats.DebugGC) -- r.Register(""runtime.MemStats.EnableGC"", runtimeMetrics.MemStats.EnableGC) -- r.Register(""runtime.MemStats.Frees"", runtimeMetrics.MemStats.Frees) -- r.Register(""runtime.MemStats.HeapAlloc"", runtimeMetrics.MemStats.HeapAlloc) -- r.Register(""runtime.MemStats.HeapIdle"", runtimeMetrics.MemStats.HeapIdle) -- r.Register(""runtime.MemStats.HeapInuse"", runtimeMetrics.MemStats.HeapInuse) -- r.Register(""runtime.MemStats.HeapObjects"", runtimeMetrics.MemStats.HeapObjects) -- r.Register(""runtime.MemStats.HeapReleased"", runtimeMetrics.MemStats.HeapReleased) -- r.Register(""runtime.MemStats.HeapSys"", runtimeMetrics.MemStats.HeapSys) -- r.Register(""runtime.MemStats.LastGC"", runtimeMetrics.MemStats.LastGC) -- r.Register(""runtime.MemStats.Lookups"", runtimeMetrics.MemStats.Lookups) -- r.Register(""runtime.MemStats.Mallocs"", runtimeMetrics.MemStats.Mallocs) -- r.Register(""runtime.MemStats.MCacheInuse"", runtimeMetrics.MemStats.MCacheInuse) -- r.Register(""runtime.MemStats.MCacheSys"", runtimeMetrics.MemStats.MCacheSys) -- r.Register(""runtime.MemStats.MSpanInuse"", runtimeMetrics.MemStats.MSpanInuse) -- r.Register(""runtime.MemStats.MSpanSys"", runtimeMetrics.MemStats.MSpanSys) -- r.Register(""runtime.MemStats.NextGC"", runtimeMetrics.MemStats.NextGC) -- r.Register(""runtime.MemStats.NumGC"", runtimeMetrics.MemStats.NumGC) -- r.Register(""runtime.MemStats.GCCPUFraction"", runtimeMetrics.MemStats.GCCPUFraction) -- r.Register(""runtime.MemStats.PauseNs"", runtimeMetrics.MemStats.PauseNs) -- r.Register(""runtime.MemStats.PauseTotalNs"", runtimeMetrics.MemStats.PauseTotalNs) -- r.Register(""runtime.MemStats.StackInuse"", runtimeMetrics.MemStats.StackInuse) -- r.Register(""runtime.MemStats.StackSys"", runtimeMetrics.MemStats.StackSys) -- r.Register(""runtime.MemStats.Sys"", runtimeMetrics.MemStats.Sys) -- r.Register(""runtime.MemStats.TotalAlloc"", runtimeMetrics.MemStats.TotalAlloc) -- r.Register(""runtime.NumCgoCall"", runtimeMetrics.NumCgoCall) -- r.Register(""runtime.NumGoroutine"", runtimeMetrics.NumGoroutine) -- r.Register(""runtime.NumThread"", runtimeMetrics.NumThread) -- r.Register(""runtime.ReadMemStats"", runtimeMetrics.ReadMemStats) --} -diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go -deleted file mode 100644 -index e3391f4e89fa1..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go -+++ /dev/null -@@ -1,10 +0,0 @@ --// +build cgo --// +build !appengine -- --package metrics -- --import ""runtime"" -- --func numCgoCall() int64 { -- return runtime.NumCgoCall() --} -diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go -deleted file mode 100644 -index ca12c05bac740..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go -+++ /dev/null -@@ -1,9 +0,0 @@ --// +build go1.5 -- --package metrics -- --import ""runtime"" -- --func gcCPUFraction(memStats *runtime.MemStats) float64 { -- return memStats.GCCPUFraction --} -diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go -deleted file mode 100644 -index 616a3b4751be1..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go -+++ /dev/null -@@ -1,7 +0,0 @@ --// +build !cgo appengine -- --package metrics -- --func numCgoCall() int64 { -- return 0 --} -diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go -deleted file mode 100644 -index be96aa6f1be9e..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go -+++ /dev/null -@@ -1,9 +0,0 @@ --// +build !go1.5 -- --package metrics -- --import ""runtime"" -- --func gcCPUFraction(memStats *runtime.MemStats) float64 { -- return 0 --} -diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go -deleted file mode 100644 -index fecee5ef68ba7..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/sample.go -+++ /dev/null -@@ -1,616 +0,0 @@ --package metrics -- --import ( -- ""math"" -- ""math/rand"" -- ""sort"" -- ""sync"" -- ""time"" --) -- --const rescaleThreshold = time.Hour -- --// Samples maintain a statistically-significant selection of values from --// a stream. --type Sample interface { -- Clear() -- Count() int64 -- Max() int64 -- Mean() float64 -- Min() int64 -- Percentile(float64) float64 -- Percentiles([]float64) []float64 -- Size() int -- Snapshot() Sample -- StdDev() float64 -- Sum() int64 -- Update(int64) -- Values() []int64 -- Variance() float64 --} -- --// ExpDecaySample is an exponentially-decaying sample using a forward-decaying --// priority reservoir. See Cormode et al's ""Forward Decay: A Practical Time --// Decay Model for Streaming Systems"". --// --// --type ExpDecaySample struct { -- alpha float64 -- count int64 -- mutex sync.Mutex -- reservoirSize int -- t0, t1 time.Time -- values *expDecaySampleHeap --} -- --// NewExpDecaySample constructs a new exponentially-decaying sample with the --// given reservoir size and alpha. --func NewExpDecaySample(reservoirSize int, alpha float64) Sample { -- if UseNilMetrics { -- return NilSample{} -- } -- s := &ExpDecaySample{ -- alpha: alpha, -- reservoirSize: reservoirSize, -- t0: time.Now(), -- values: newExpDecaySampleHeap(reservoirSize), -- } -- s.t1 = s.t0.Add(rescaleThreshold) -- return s --} -- --// Clear clears all samples. --func (s *ExpDecaySample) Clear() { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- s.count = 0 -- s.t0 = time.Now() -- s.t1 = s.t0.Add(rescaleThreshold) -- s.values.Clear() --} -- --// Count returns the number of samples recorded, which may exceed the --// reservoir size. --func (s *ExpDecaySample) Count() int64 { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- return s.count --} -- --// Max returns the maximum value in the sample, which may not be the maximum --// value ever to be part of the sample. --func (s *ExpDecaySample) Max() int64 { -- return SampleMax(s.Values()) --} -- --// Mean returns the mean of the values in the sample. --func (s *ExpDecaySample) Mean() float64 { -- return SampleMean(s.Values()) --} -- --// Min returns the minimum value in the sample, which may not be the minimum --// value ever to be part of the sample. --func (s *ExpDecaySample) Min() int64 { -- return SampleMin(s.Values()) --} -- --// Percentile returns an arbitrary percentile of values in the sample. --func (s *ExpDecaySample) Percentile(p float64) float64 { -- return SamplePercentile(s.Values(), p) --} -- --// Percentiles returns a slice of arbitrary percentiles of values in the --// sample. --func (s *ExpDecaySample) Percentiles(ps []float64) []float64 { -- return SamplePercentiles(s.Values(), ps) --} -- --// Size returns the size of the sample, which is at most the reservoir size. --func (s *ExpDecaySample) Size() int { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- return s.values.Size() --} -- --// Snapshot returns a read-only copy of the sample. --func (s *ExpDecaySample) Snapshot() Sample { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- vals := s.values.Values() -- values := make([]int64, len(vals)) -- for i, v := range vals { -- values[i] = v.v -- } -- return &SampleSnapshot{ -- count: s.count, -- values: values, -- } --} -- --// StdDev returns the standard deviation of the values in the sample. --func (s *ExpDecaySample) StdDev() float64 { -- return SampleStdDev(s.Values()) --} -- --// Sum returns the sum of the values in the sample. --func (s *ExpDecaySample) Sum() int64 { -- return SampleSum(s.Values()) --} -- --// Update samples a new value. --func (s *ExpDecaySample) Update(v int64) { -- s.update(time.Now(), v) --} -- --// Values returns a copy of the values in the sample. --func (s *ExpDecaySample) Values() []int64 { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- vals := s.values.Values() -- values := make([]int64, len(vals)) -- for i, v := range vals { -- values[i] = v.v -- } -- return values --} -- --// Variance returns the variance of the values in the sample. --func (s *ExpDecaySample) Variance() float64 { -- return SampleVariance(s.Values()) --} -- --// update samples a new value at a particular timestamp. This is a method all --// its own to facilitate testing. --func (s *ExpDecaySample) update(t time.Time, v int64) { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- s.count++ -- if s.values.Size() == s.reservoirSize { -- s.values.Pop() -- } -- s.values.Push(expDecaySample{ -- k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(), -- v: v, -- }) -- if t.After(s.t1) { -- values := s.values.Values() -- t0 := s.t0 -- s.values.Clear() -- s.t0 = t -- s.t1 = s.t0.Add(rescaleThreshold) -- for _, v := range values { -- v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds()) -- s.values.Push(v) -- } -- } --} -- --// NilSample is a no-op Sample. --type NilSample struct{} -- --// Clear is a no-op. --func (NilSample) Clear() {} -- --// Count is a no-op. --func (NilSample) Count() int64 { return 0 } -- --// Max is a no-op. --func (NilSample) Max() int64 { return 0 } -- --// Mean is a no-op. --func (NilSample) Mean() float64 { return 0.0 } -- --// Min is a no-op. --func (NilSample) Min() int64 { return 0 } -- --// Percentile is a no-op. --func (NilSample) Percentile(p float64) float64 { return 0.0 } -- --// Percentiles is a no-op. --func (NilSample) Percentiles(ps []float64) []float64 { -- return make([]float64, len(ps)) --} -- --// Size is a no-op. --func (NilSample) Size() int { return 0 } -- --// Sample is a no-op. --func (NilSample) Snapshot() Sample { return NilSample{} } -- --// StdDev is a no-op. --func (NilSample) StdDev() float64 { return 0.0 } -- --// Sum is a no-op. --func (NilSample) Sum() int64 { return 0 } -- --// Update is a no-op. --func (NilSample) Update(v int64) {} -- --// Values is a no-op. --func (NilSample) Values() []int64 { return []int64{} } -- --// Variance is a no-op. --func (NilSample) Variance() float64 { return 0.0 } -- --// SampleMax returns the maximum value of the slice of int64. --func SampleMax(values []int64) int64 { -- if 0 == len(values) { -- return 0 -- } -- var max int64 = math.MinInt64 -- for _, v := range values { -- if max < v { -- max = v -- } -- } -- return max --} -- --// SampleMean returns the mean value of the slice of int64. --func SampleMean(values []int64) float64 { -- if 0 == len(values) { -- return 0.0 -- } -- return float64(SampleSum(values)) / float64(len(values)) --} -- --// SampleMin returns the minimum value of the slice of int64. --func SampleMin(values []int64) int64 { -- if 0 == len(values) { -- return 0 -- } -- var min int64 = math.MaxInt64 -- for _, v := range values { -- if min > v { -- min = v -- } -- } -- return min --} -- --// SamplePercentiles returns an arbitrary percentile of the slice of int64. --func SamplePercentile(values int64Slice, p float64) float64 { -- return SamplePercentiles(values, []float64{p})[0] --} -- --// SamplePercentiles returns a slice of arbitrary percentiles of the slice of --// int64. --func SamplePercentiles(values int64Slice, ps []float64) []float64 { -- scores := make([]float64, len(ps)) -- size := len(values) -- if size > 0 { -- sort.Sort(values) -- for i, p := range ps { -- pos := p * float64(size+1) -- if pos < 1.0 { -- scores[i] = float64(values[0]) -- } else if pos >= float64(size) { -- scores[i] = float64(values[size-1]) -- } else { -- lower := float64(values[int(pos)-1]) -- upper := float64(values[int(pos)]) -- scores[i] = lower + (pos-math.Floor(pos))*(upper-lower) -- } -- } -- } -- return scores --} -- --// SampleSnapshot is a read-only copy of another Sample. --type SampleSnapshot struct { -- count int64 -- values []int64 --} -- --func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot { -- return &SampleSnapshot{ -- count: count, -- values: values, -- } --} -- --// Clear panics. --func (*SampleSnapshot) Clear() { -- panic(""Clear called on a SampleSnapshot"") --} -- --// Count returns the count of inputs at the time the snapshot was taken. --func (s *SampleSnapshot) Count() int64 { return s.count } -- --// Max returns the maximal value at the time the snapshot was taken. --func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) } -- --// Mean returns the mean value at the time the snapshot was taken. --func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) } -- --// Min returns the minimal value at the time the snapshot was taken. --func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) } -- --// Percentile returns an arbitrary percentile of values at the time the --// snapshot was taken. --func (s *SampleSnapshot) Percentile(p float64) float64 { -- return SamplePercentile(s.values, p) --} -- --// Percentiles returns a slice of arbitrary percentiles of values at the time --// the snapshot was taken. --func (s *SampleSnapshot) Percentiles(ps []float64) []float64 { -- return SamplePercentiles(s.values, ps) --} -- --// Size returns the size of the sample at the time the snapshot was taken. --func (s *SampleSnapshot) Size() int { return len(s.values) } -- --// Snapshot returns the snapshot. --func (s *SampleSnapshot) Snapshot() Sample { return s } -- --// StdDev returns the standard deviation of values at the time the snapshot was --// taken. --func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) } -- --// Sum returns the sum of values at the time the snapshot was taken. --func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) } -- --// Update panics. --func (*SampleSnapshot) Update(int64) { -- panic(""Update called on a SampleSnapshot"") --} -- --// Values returns a copy of the values in the sample. --func (s *SampleSnapshot) Values() []int64 { -- values := make([]int64, len(s.values)) -- copy(values, s.values) -- return values --} -- --// Variance returns the variance of values at the time the snapshot was taken. --func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) } -- --// SampleStdDev returns the standard deviation of the slice of int64. --func SampleStdDev(values []int64) float64 { -- return math.Sqrt(SampleVariance(values)) --} -- --// SampleSum returns the sum of the slice of int64. --func SampleSum(values []int64) int64 { -- var sum int64 -- for _, v := range values { -- sum += v -- } -- return sum --} -- --// SampleVariance returns the variance of the slice of int64. --func SampleVariance(values []int64) float64 { -- if 0 == len(values) { -- return 0.0 -- } -- m := SampleMean(values) -- var sum float64 -- for _, v := range values { -- d := float64(v) - m -- sum += d * d -- } -- return sum / float64(len(values)) --} -- --// A uniform sample using Vitter's Algorithm R. --// --// --type UniformSample struct { -- count int64 -- mutex sync.Mutex -- reservoirSize int -- values []int64 --} -- --// NewUniformSample constructs a new uniform sample with the given reservoir --// size. --func NewUniformSample(reservoirSize int) Sample { -- if UseNilMetrics { -- return NilSample{} -- } -- return &UniformSample{ -- reservoirSize: reservoirSize, -- values: make([]int64, 0, reservoirSize), -- } --} -- --// Clear clears all samples. --func (s *UniformSample) Clear() { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- s.count = 0 -- s.values = make([]int64, 0, s.reservoirSize) --} -- --// Count returns the number of samples recorded, which may exceed the --// reservoir size. --func (s *UniformSample) Count() int64 { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- return s.count --} -- --// Max returns the maximum value in the sample, which may not be the maximum --// value ever to be part of the sample. --func (s *UniformSample) Max() int64 { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- return SampleMax(s.values) --} -- --// Mean returns the mean of the values in the sample. --func (s *UniformSample) Mean() float64 { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- return SampleMean(s.values) --} -- --// Min returns the minimum value in the sample, which may not be the minimum --// value ever to be part of the sample. --func (s *UniformSample) Min() int64 { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- return SampleMin(s.values) --} -- --// Percentile returns an arbitrary percentile of values in the sample. --func (s *UniformSample) Percentile(p float64) float64 { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- return SamplePercentile(s.values, p) --} -- --// Percentiles returns a slice of arbitrary percentiles of values in the --// sample. --func (s *UniformSample) Percentiles(ps []float64) []float64 { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- return SamplePercentiles(s.values, ps) --} -- --// Size returns the size of the sample, which is at most the reservoir size. --func (s *UniformSample) Size() int { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- return len(s.values) --} -- --// Snapshot returns a read-only copy of the sample. --func (s *UniformSample) Snapshot() Sample { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- values := make([]int64, len(s.values)) -- copy(values, s.values) -- return &SampleSnapshot{ -- count: s.count, -- values: values, -- } --} -- --// StdDev returns the standard deviation of the values in the sample. --func (s *UniformSample) StdDev() float64 { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- return SampleStdDev(s.values) --} -- --// Sum returns the sum of the values in the sample. --func (s *UniformSample) Sum() int64 { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- return SampleSum(s.values) --} -- --// Update samples a new value. --func (s *UniformSample) Update(v int64) { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- s.count++ -- if len(s.values) < s.reservoirSize { -- s.values = append(s.values, v) -- } else { -- r := rand.Int63n(s.count) -- if r < int64(len(s.values)) { -- s.values[int(r)] = v -- } -- } --} -- --// Values returns a copy of the values in the sample. --func (s *UniformSample) Values() []int64 { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- values := make([]int64, len(s.values)) -- copy(values, s.values) -- return values --} -- --// Variance returns the variance of the values in the sample. --func (s *UniformSample) Variance() float64 { -- s.mutex.Lock() -- defer s.mutex.Unlock() -- return SampleVariance(s.values) --} -- --// expDecaySample represents an individual sample in a heap. --type expDecaySample struct { -- k float64 -- v int64 --} -- --func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap { -- return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)} --} -- --// expDecaySampleHeap is a min-heap of expDecaySamples. --// The internal implementation is copied from the standard library's container/heap --type expDecaySampleHeap struct { -- s []expDecaySample --} -- --func (h *expDecaySampleHeap) Clear() { -- h.s = h.s[:0] --} -- --func (h *expDecaySampleHeap) Push(s expDecaySample) { -- n := len(h.s) -- h.s = h.s[0 : n+1] -- h.s[n] = s -- h.up(n) --} -- --func (h *expDecaySampleHeap) Pop() expDecaySample { -- n := len(h.s) - 1 -- h.s[0], h.s[n] = h.s[n], h.s[0] -- h.down(0, n) -- -- n = len(h.s) -- s := h.s[n-1] -- h.s = h.s[0 : n-1] -- return s --} -- --func (h *expDecaySampleHeap) Size() int { -- return len(h.s) --} -- --func (h *expDecaySampleHeap) Values() []expDecaySample { -- return h.s --} -- --func (h *expDecaySampleHeap) up(j int) { -- for { -- i := (j - 1) / 2 // parent -- if i == j || !(h.s[j].k < h.s[i].k) { -- break -- } -- h.s[i], h.s[j] = h.s[j], h.s[i] -- j = i -- } --} -- --func (h *expDecaySampleHeap) down(i, n int) { -- for { -- j1 := 2*i + 1 -- if j1 >= n || j1 < 0 { // j1 < 0 after int overflow -- break -- } -- j := j1 // left child -- if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) { -- j = j2 // = 2*i + 2 // right child -- } -- if !(h.s[j].k < h.s[i].k) { -- break -- } -- h.s[i], h.s[j] = h.s[j], h.s[i] -- i = j -- } --} -- --type int64Slice []int64 -- --func (p int64Slice) Len() int { return len(p) } --func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } --func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/vendor/github.com/rcrowley/go-metrics/syslog.go -deleted file mode 100644 -index 693f190855c1e..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/syslog.go -+++ /dev/null -@@ -1,78 +0,0 @@ --// +build !windows -- --package metrics -- --import ( -- ""fmt"" -- ""log/syslog"" -- ""time"" --) -- --// Output each metric in the given registry to syslog periodically using --// the given syslogger. --func Syslog(r Registry, d time.Duration, w *syslog.Writer) { -- for _ = range time.Tick(d) { -- r.Each(func(name string, i interface{}) { -- switch metric := i.(type) { -- case Counter: -- w.Info(fmt.Sprintf(""counter %s: count: %d"", name, metric.Count())) -- case Gauge: -- w.Info(fmt.Sprintf(""gauge %s: value: %d"", name, metric.Value())) -- case GaugeFloat64: -- w.Info(fmt.Sprintf(""gauge %s: value: %f"", name, metric.Value())) -- case Healthcheck: -- metric.Check() -- w.Info(fmt.Sprintf(""healthcheck %s: error: %v"", name, metric.Error())) -- case Histogram: -- h := metric.Snapshot() -- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) -- w.Info(fmt.Sprintf( -- ""histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f"", -- name, -- h.Count(), -- h.Min(), -- h.Max(), -- h.Mean(), -- h.StdDev(), -- ps[0], -- ps[1], -- ps[2], -- ps[3], -- ps[4], -- )) -- case Meter: -- m := metric.Snapshot() -- w.Info(fmt.Sprintf( -- ""meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f"", -- name, -- m.Count(), -- m.Rate1(), -- m.Rate5(), -- m.Rate15(), -- m.RateMean(), -- )) -- case Timer: -- t := metric.Snapshot() -- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) -- w.Info(fmt.Sprintf( -- ""timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f"", -- name, -- t.Count(), -- t.Min(), -- t.Max(), -- t.Mean(), -- t.StdDev(), -- ps[0], -- ps[1], -- ps[2], -- ps[3], -- ps[4], -- t.Rate1(), -- t.Rate5(), -- t.Rate15(), -- t.RateMean(), -- )) -- } -- }) -- } --} -diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go -deleted file mode 100644 -index d6ec4c6260fcb..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/timer.go -+++ /dev/null -@@ -1,329 +0,0 @@ --package metrics -- --import ( -- ""sync"" -- ""time"" --) -- --// Timers capture the duration and rate of events. --type Timer interface { -- Count() int64 -- Max() int64 -- Mean() float64 -- Min() int64 -- Percentile(float64) float64 -- Percentiles([]float64) []float64 -- Rate1() float64 -- Rate5() float64 -- Rate15() float64 -- RateMean() float64 -- Snapshot() Timer -- StdDev() float64 -- Stop() -- Sum() int64 -- Time(func()) -- Update(time.Duration) -- UpdateSince(time.Time) -- Variance() float64 --} -- --// GetOrRegisterTimer returns an existing Timer or constructs and registers a --// new StandardTimer. --// Be sure to unregister the meter from the registry once it is of no use to --// allow for garbage collection. --func GetOrRegisterTimer(name string, r Registry) Timer { -- if nil == r { -- r = DefaultRegistry -- } -- return r.GetOrRegister(name, NewTimer).(Timer) --} -- --// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter. --// Be sure to call Stop() once the timer is of no use to allow for garbage collection. --func NewCustomTimer(h Histogram, m Meter) Timer { -- if UseNilMetrics { -- return NilTimer{} -- } -- return &StandardTimer{ -- histogram: h, -- meter: m, -- } --} -- --// NewRegisteredTimer constructs and registers a new StandardTimer. --// Be sure to unregister the meter from the registry once it is of no use to --// allow for garbage collection. --func NewRegisteredTimer(name string, r Registry) Timer { -- c := NewTimer() -- if nil == r { -- r = DefaultRegistry -- } -- r.Register(name, c) -- return c --} -- --// NewTimer constructs a new StandardTimer using an exponentially-decaying --// sample with the same reservoir size and alpha as UNIX load averages. --// Be sure to call Stop() once the timer is of no use to allow for garbage collection. --func NewTimer() Timer { -- if UseNilMetrics { -- return NilTimer{} -- } -- return &StandardTimer{ -- histogram: NewHistogram(NewExpDecaySample(1028, 0.015)), -- meter: NewMeter(), -- } --} -- --// NilTimer is a no-op Timer. --type NilTimer struct { -- h Histogram -- m Meter --} -- --// Count is a no-op. --func (NilTimer) Count() int64 { return 0 } -- --// Max is a no-op. --func (NilTimer) Max() int64 { return 0 } -- --// Mean is a no-op. --func (NilTimer) Mean() float64 { return 0.0 } -- --// Min is a no-op. --func (NilTimer) Min() int64 { return 0 } -- --// Percentile is a no-op. --func (NilTimer) Percentile(p float64) float64 { return 0.0 } -- --// Percentiles is a no-op. --func (NilTimer) Percentiles(ps []float64) []float64 { -- return make([]float64, len(ps)) --} -- --// Rate1 is a no-op. --func (NilTimer) Rate1() float64 { return 0.0 } -- --// Rate5 is a no-op. --func (NilTimer) Rate5() float64 { return 0.0 } -- --// Rate15 is a no-op. --func (NilTimer) Rate15() float64 { return 0.0 } -- --// RateMean is a no-op. --func (NilTimer) RateMean() float64 { return 0.0 } -- --// Snapshot is a no-op. --func (NilTimer) Snapshot() Timer { return NilTimer{} } -- --// StdDev is a no-op. --func (NilTimer) StdDev() float64 { return 0.0 } -- --// Stop is a no-op. --func (NilTimer) Stop() {} -- --// Sum is a no-op. --func (NilTimer) Sum() int64 { return 0 } -- --// Time is a no-op. --func (NilTimer) Time(func()) {} -- --// Update is a no-op. --func (NilTimer) Update(time.Duration) {} -- --// UpdateSince is a no-op. --func (NilTimer) UpdateSince(time.Time) {} -- --// Variance is a no-op. --func (NilTimer) Variance() float64 { return 0.0 } -- --// StandardTimer is the standard implementation of a Timer and uses a Histogram --// and Meter. --type StandardTimer struct { -- histogram Histogram -- meter Meter -- mutex sync.Mutex --} -- --// Count returns the number of events recorded. --func (t *StandardTimer) Count() int64 { -- return t.histogram.Count() --} -- --// Max returns the maximum value in the sample. --func (t *StandardTimer) Max() int64 { -- return t.histogram.Max() --} -- --// Mean returns the mean of the values in the sample. --func (t *StandardTimer) Mean() float64 { -- return t.histogram.Mean() --} -- --// Min returns the minimum value in the sample. --func (t *StandardTimer) Min() int64 { -- return t.histogram.Min() --} -- --// Percentile returns an arbitrary percentile of the values in the sample. --func (t *StandardTimer) Percentile(p float64) float64 { -- return t.histogram.Percentile(p) --} -- --// Percentiles returns a slice of arbitrary percentiles of the values in the --// sample. --func (t *StandardTimer) Percentiles(ps []float64) []float64 { -- return t.histogram.Percentiles(ps) --} -- --// Rate1 returns the one-minute moving average rate of events per second. --func (t *StandardTimer) Rate1() float64 { -- return t.meter.Rate1() --} -- --// Rate5 returns the five-minute moving average rate of events per second. --func (t *StandardTimer) Rate5() float64 { -- return t.meter.Rate5() --} -- --// Rate15 returns the fifteen-minute moving average rate of events per second. --func (t *StandardTimer) Rate15() float64 { -- return t.meter.Rate15() --} -- --// RateMean returns the meter's mean rate of events per second. --func (t *StandardTimer) RateMean() float64 { -- return t.meter.RateMean() --} -- --// Snapshot returns a read-only copy of the timer. --func (t *StandardTimer) Snapshot() Timer { -- t.mutex.Lock() -- defer t.mutex.Unlock() -- return &TimerSnapshot{ -- histogram: t.histogram.Snapshot().(*HistogramSnapshot), -- meter: t.meter.Snapshot().(*MeterSnapshot), -- } --} -- --// StdDev returns the standard deviation of the values in the sample. --func (t *StandardTimer) StdDev() float64 { -- return t.histogram.StdDev() --} -- --// Stop stops the meter. --func (t *StandardTimer) Stop() { -- t.meter.Stop() --} -- --// Sum returns the sum in the sample. --func (t *StandardTimer) Sum() int64 { -- return t.histogram.Sum() --} -- --// Record the duration of the execution of the given function. --func (t *StandardTimer) Time(f func()) { -- ts := time.Now() -- f() -- t.Update(time.Since(ts)) --} -- --// Record the duration of an event. --func (t *StandardTimer) Update(d time.Duration) { -- t.mutex.Lock() -- defer t.mutex.Unlock() -- t.histogram.Update(int64(d)) -- t.meter.Mark(1) --} -- --// Record the duration of an event that started at a time and ends now. --func (t *StandardTimer) UpdateSince(ts time.Time) { -- t.mutex.Lock() -- defer t.mutex.Unlock() -- t.histogram.Update(int64(time.Since(ts))) -- t.meter.Mark(1) --} -- --// Variance returns the variance of the values in the sample. --func (t *StandardTimer) Variance() float64 { -- return t.histogram.Variance() --} -- --// TimerSnapshot is a read-only copy of another Timer. --type TimerSnapshot struct { -- histogram *HistogramSnapshot -- meter *MeterSnapshot --} -- --// Count returns the number of events recorded at the time the snapshot was --// taken. --func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() } -- --// Max returns the maximum value at the time the snapshot was taken. --func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() } -- --// Mean returns the mean value at the time the snapshot was taken. --func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() } -- --// Min returns the minimum value at the time the snapshot was taken. --func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() } -- --// Percentile returns an arbitrary percentile of sampled values at the time the --// snapshot was taken. --func (t *TimerSnapshot) Percentile(p float64) float64 { -- return t.histogram.Percentile(p) --} -- --// Percentiles returns a slice of arbitrary percentiles of sampled values at --// the time the snapshot was taken. --func (t *TimerSnapshot) Percentiles(ps []float64) []float64 { -- return t.histogram.Percentiles(ps) --} -- --// Rate1 returns the one-minute moving average rate of events per second at the --// time the snapshot was taken. --func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() } -- --// Rate5 returns the five-minute moving average rate of events per second at --// the time the snapshot was taken. --func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() } -- --// Rate15 returns the fifteen-minute moving average rate of events per second --// at the time the snapshot was taken. --func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() } -- --// RateMean returns the meter's mean rate of events per second at the time the --// snapshot was taken. --func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() } -- --// Snapshot returns the snapshot. --func (t *TimerSnapshot) Snapshot() Timer { return t } -- --// StdDev returns the standard deviation of the values at the time the snapshot --// was taken. --func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() } -- --// Stop is a no-op. --func (t *TimerSnapshot) Stop() {} -- --// Sum returns the sum at the time the snapshot was taken. --func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() } -- --// Time panics. --func (*TimerSnapshot) Time(func()) { -- panic(""Time called on a TimerSnapshot"") --} -- --// Update panics. --func (*TimerSnapshot) Update(time.Duration) { -- panic(""Update called on a TimerSnapshot"") --} -- --// UpdateSince panics. --func (*TimerSnapshot) UpdateSince(time.Time) { -- panic(""UpdateSince called on a TimerSnapshot"") --} -- --// Variance returns the variance of the values at the time the snapshot was --// taken. --func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() } -diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/vendor/github.com/rcrowley/go-metrics/validate.sh -deleted file mode 100755 -index c4ae91e642d67..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/validate.sh -+++ /dev/null -@@ -1,10 +0,0 @@ --#!/bin/bash -- --set -e -- --# check there are no formatting issues --GOFMT_LINES=`gofmt -l . | wc -l | xargs` --test $GOFMT_LINES -eq 0 || echo ""gofmt needs to be run, ${GOFMT_LINES} files have issues"" -- --# run the tests for the root package --go test -race . -diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/vendor/github.com/rcrowley/go-metrics/writer.go -deleted file mode 100644 -index 091e971d2e6fa..0000000000000 ---- a/vendor/github.com/rcrowley/go-metrics/writer.go -+++ /dev/null -@@ -1,100 +0,0 @@ --package metrics -- --import ( -- ""fmt"" -- ""io"" -- ""sort"" -- ""time"" --) -- --// Write sorts writes each metric in the given registry periodically to the --// given io.Writer. --func Write(r Registry, d time.Duration, w io.Writer) { -- for _ = range time.Tick(d) { -- WriteOnce(r, w) -- } --} -- --// WriteOnce sorts and writes metrics in the given registry to the given --// io.Writer. --func WriteOnce(r Registry, w io.Writer) { -- var namedMetrics namedMetricSlice -- r.Each(func(name string, i interface{}) { -- namedMetrics = append(namedMetrics, namedMetric{name, i}) -- }) -- -- sort.Sort(namedMetrics) -- for _, namedMetric := range namedMetrics { -- switch metric := namedMetric.m.(type) { -- case Counter: -- fmt.Fprintf(w, ""counter %s\n"", namedMetric.name) -- fmt.Fprintf(w, "" count: %9d\n"", metric.Count()) -- case Gauge: -- fmt.Fprintf(w, ""gauge %s\n"", namedMetric.name) -- fmt.Fprintf(w, "" value: %9d\n"", metric.Value()) -- case GaugeFloat64: -- fmt.Fprintf(w, ""gauge %s\n"", namedMetric.name) -- fmt.Fprintf(w, "" value: %f\n"", metric.Value()) -- case Healthcheck: -- metric.Check() -- fmt.Fprintf(w, ""healthcheck %s\n"", namedMetric.name) -- fmt.Fprintf(w, "" error: %v\n"", metric.Error()) -- case Histogram: -- h := metric.Snapshot() -- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) -- fmt.Fprintf(w, ""histogram %s\n"", namedMetric.name) -- fmt.Fprintf(w, "" count: %9d\n"", h.Count()) -- fmt.Fprintf(w, "" min: %9d\n"", h.Min()) -- fmt.Fprintf(w, "" max: %9d\n"", h.Max()) -- fmt.Fprintf(w, "" mean: %12.2f\n"", h.Mean()) -- fmt.Fprintf(w, "" stddev: %12.2f\n"", h.StdDev()) -- fmt.Fprintf(w, "" median: %12.2f\n"", ps[0]) -- fmt.Fprintf(w, "" 75%%: %12.2f\n"", ps[1]) -- fmt.Fprintf(w, "" 95%%: %12.2f\n"", ps[2]) -- fmt.Fprintf(w, "" 99%%: %12.2f\n"", ps[3]) -- fmt.Fprintf(w, "" 99.9%%: %12.2f\n"", ps[4]) -- case Meter: -- m := metric.Snapshot() -- fmt.Fprintf(w, ""meter %s\n"", namedMetric.name) -- fmt.Fprintf(w, "" count: %9d\n"", m.Count()) -- fmt.Fprintf(w, "" 1-min rate: %12.2f\n"", m.Rate1()) -- fmt.Fprintf(w, "" 5-min rate: %12.2f\n"", m.Rate5()) -- fmt.Fprintf(w, "" 15-min rate: %12.2f\n"", m.Rate15()) -- fmt.Fprintf(w, "" mean rate: %12.2f\n"", m.RateMean()) -- case Timer: -- t := metric.Snapshot() -- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) -- fmt.Fprintf(w, ""timer %s\n"", namedMetric.name) -- fmt.Fprintf(w, "" count: %9d\n"", t.Count()) -- fmt.Fprintf(w, "" min: %9d\n"", t.Min()) -- fmt.Fprintf(w, "" max: %9d\n"", t.Max()) -- fmt.Fprintf(w, "" mean: %12.2f\n"", t.Mean()) -- fmt.Fprintf(w, "" stddev: %12.2f\n"", t.StdDev()) -- fmt.Fprintf(w, "" median: %12.2f\n"", ps[0]) -- fmt.Fprintf(w, "" 75%%: %12.2f\n"", ps[1]) -- fmt.Fprintf(w, "" 95%%: %12.2f\n"", ps[2]) -- fmt.Fprintf(w, "" 99%%: %12.2f\n"", ps[3]) -- fmt.Fprintf(w, "" 99.9%%: %12.2f\n"", ps[4]) -- fmt.Fprintf(w, "" 1-min rate: %12.2f\n"", t.Rate1()) -- fmt.Fprintf(w, "" 5-min rate: %12.2f\n"", t.Rate5()) -- fmt.Fprintf(w, "" 15-min rate: %12.2f\n"", t.Rate15()) -- fmt.Fprintf(w, "" mean rate: %12.2f\n"", t.RateMean()) -- } -- } --} -- --type namedMetric struct { -- name string -- m interface{} --} -- --// namedMetricSlice is a slice of namedMetrics that implements sort.Interface. --type namedMetricSlice []namedMetric -- --func (nms namedMetricSlice) Len() int { return len(nms) } -- --func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] } -- --func (nms namedMetricSlice) Less(i, j int) bool { -- return nms[i].name < nms[j].name --} -diff --git a/vendor/github.com/weaveworks-experiments/loki/pkg/client/collector.go b/vendor/github.com/weaveworks-experiments/loki/pkg/client/collector.go -deleted file mode 100644 -index 78e4a771c30cf..0000000000000 ---- a/vendor/github.com/weaveworks-experiments/loki/pkg/client/collector.go -+++ /dev/null -@@ -1,150 +0,0 @@ --package loki -- --import ( -- ""fmt"" -- ""io"" -- ""log"" -- ""net/http"" -- ""sync"" -- -- ""github.com/apache/thrift/lib/go/thrift"" -- ""github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"" --) -- --// Want to be able to support a service doing 100 QPS with a 15s scrape interval --var globalCollector = NewCollector(15 * 100) -- --type Collector struct { -- mtx sync.Mutex -- traceIDs map[int64]int // map from trace ID to index in traces -- traces []trace -- next int -- length int --} -- --type trace struct { -- traceID int64 -- spans []*zipkincore.Span --} -- --func NewCollector(capacity int) *Collector { -- return &Collector{ -- traceIDs: make(map[int64]int, capacity), -- traces: make([]trace, capacity, capacity), -- next: 0, -- length: 0, -- } --} -- --func (c *Collector) Collect(span *zipkincore.Span) error { -- if span == nil { -- return fmt.Errorf(""cannot collect nil span"") -- } -- -- c.mtx.Lock() -- defer c.mtx.Unlock() -- -- traceID := span.GetTraceID() -- idx, ok := c.traceIDs[traceID] -- if !ok { -- // Pick a slot in c.spans for this trace -- idx = c.next -- c.next++ -- c.next %= cap(c.traces) // wrap -- -- // If the slot it occupied, we'll need to clear the trace ID index, -- // otherwise we'll need to number of traces. -- if c.length == cap(c.traces) { -- delete(c.traceIDs, c.traces[idx].traceID) -- } else { -- c.length++ -- } -- -- // Initialise said slot. -- c.traceIDs[traceID] = idx -- c.traces[idx].traceID = traceID -- c.traces[idx].spans = c.traces[idx].spans[:0] -- } -- -- c.traces[idx].spans = append(c.traces[idx].spans, span) -- return nil --} -- --func (*Collector) Close() error { -- return nil --} -- --func (c *Collector) gather() []*zipkincore.Span { -- c.mtx.Lock() -- defer c.mtx.Unlock() -- -- spans := make([]*zipkincore.Span, 0, c.length) -- i, count := c.next-c.length, 0 -- if i < 0 { -- i = cap(c.traces) + i -- } -- for count < c.length { -- i %= cap(c.traces) -- spans = append(spans, c.traces[i].spans...) -- delete(c.traceIDs, c.traces[i].traceID) -- i++ -- count++ -- } -- c.length = 0 -- if len(c.traceIDs) != 0 { -- panic(""didn't clear all trace ids"") -- } -- return spans --} -- --func (c *Collector) ServeHTTP(w http.ResponseWriter, r *http.Request) { -- spans := c.gather() -- if err := WriteSpans(spans, w); err != nil { -- log.Printf(""error writing spans: %v"", err) -- http.Error(w, err.Error(), http.StatusInternalServerError) -- return -- } --} -- --func WriteSpans(spans []*zipkincore.Span, w io.Writer) error { -- transport := thrift.NewStreamTransportW(w) -- protocol := thrift.NewTCompactProtocol(transport) -- -- if err := protocol.WriteListBegin(thrift.STRUCT, len(spans)); err != nil { -- return err -- } -- for _, span := range spans { -- if err := span.Write(protocol); err != nil { -- return err -- } -- } -- if err := protocol.WriteListEnd(); err != nil { -- return err -- } -- return protocol.Flush() --} -- --func ReadSpans(r io.Reader) ([]*zipkincore.Span, error) { -- transport := thrift.NewStreamTransportR(r) -- protocol := thrift.NewTCompactProtocol(transport) -- ttype, size, err := protocol.ReadListBegin() -- if err != nil { -- return nil, err -- } -- spans := make([]*zipkincore.Span, 0, size) -- if ttype != thrift.STRUCT { -- return nil, fmt.Errorf(""unexpected type: %v"", ttype) -- } -- for i := 0; i < size; i++ { -- span := zipkincore.NewSpan() -- if err := span.Read(protocol); err != nil { -- return nil, err -- } -- spans = append(spans, span) -- } -- return spans, protocol.ReadListEnd() --} -- --func Handler() http.Handler { -- return globalCollector --} -diff --git a/vendor/github.com/weaveworks-experiments/loki/pkg/client/tracer.go b/vendor/github.com/weaveworks-experiments/loki/pkg/client/tracer.go -deleted file mode 100644 -index b3efe9e97fd19..0000000000000 ---- a/vendor/github.com/weaveworks-experiments/loki/pkg/client/tracer.go -+++ /dev/null -@@ -1,27 +0,0 @@ --package loki -- --import ( -- ""fmt"" -- ""os"" -- -- ""github.com/opentracing/opentracing-go"" -- ""github.com/openzipkin/zipkin-go-opentracing"" --) -- --func NewTracer() (opentracing.Tracer, error) { -- // create recorder. -- hostname, err := os.Hostname() -- if err != nil { -- return nil, err -- } -- recorder := zipkintracer.NewRecorder(globalCollector, false, hostname, """") -- -- // create tracer. -- tracer, err := zipkintracer.NewTracer(recorder) -- if err != nil { -- fmt.Printf(""unable to create Zipkin tracer: %+v"", err) -- os.Exit(-1) -- } -- -- return tracer, nil --} -diff --git a/vendor/github.com/weaveworks/common/middleware/grpc_auth.go b/vendor/github.com/weaveworks/common/middleware/grpc_auth.go -index 35f548792cc5b..10be1f8d706a1 100644 ---- a/vendor/github.com/weaveworks/common/middleware/grpc_auth.go -+++ b/vendor/github.com/weaveworks/common/middleware/grpc_auth.go -@@ -17,6 +17,17 @@ func ClientUserHeaderInterceptor(ctx context.Context, method string, req, reply - return invoker(ctx, method, req, reply, cc, opts...) - } - -+// StreamClientUserHeaderInterceptor propagates the user ID from the context to gRPC metadata, which eventually ends up as a HTTP2 header. -+// For streaming gRPC requests. -+func StreamClientUserHeaderInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { -+ ctx, err := user.InjectIntoGRPCRequest(ctx) -+ if err != nil { -+ return nil, err -+ } -+ -+ return streamer(ctx, desc, cc, method, opts...) -+} -+ - // ServerUserHeaderInterceptor propagates the user ID from the gRPC metadata back to our context. - func ServerUserHeaderInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - _, ctx, err := user.ExtractFromGRPCRequest(ctx) -@@ -26,3 +37,25 @@ func ServerUserHeaderInterceptor(ctx context.Context, req interface{}, info *grp - - return handler(ctx, req) - } -+ -+// StreamServerUserHeaderInterceptor propagates the user ID from the gRPC metadata back to our context. -+func StreamServerUserHeaderInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { -+ _, ctx, err := user.ExtractFromGRPCRequest(ss.Context()) -+ if err != nil { -+ return err -+ } -+ -+ return handler(srv, serverStream{ -+ ctx: ctx, -+ ServerStream: ss, -+ }) -+} -+ -+type serverStream struct { -+ ctx context.Context -+ grpc.ServerStream -+} -+ -+func (ss serverStream) Context() context.Context { -+ return ss.ctx -+} -diff --git a/vendor/github.com/weaveworks/common/middleware/grpc_instrumentation.go b/vendor/github.com/weaveworks/common/middleware/grpc_instrumentation.go -index 0614433448aab..c5c7d8468aa76 100644 ---- a/vendor/github.com/weaveworks/common/middleware/grpc_instrumentation.go -+++ b/vendor/github.com/weaveworks/common/middleware/grpc_instrumentation.go -@@ -10,8 +10,8 @@ import ( - ""google.golang.org/grpc"" - ) - --// ServerInstrumentInterceptor instruments gRPC requests for errors and latency. --func ServerInstrumentInterceptor(hist *prometheus.HistogramVec) grpc.UnaryServerInterceptor { -+// UnaryServerInstrumentInterceptor instruments gRPC requests for errors and latency. -+func UnaryServerInstrumentInterceptor(hist *prometheus.HistogramVec) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - begin := time.Now() - resp, err := handler(ctx, req) -@@ -28,3 +28,22 @@ func ServerInstrumentInterceptor(hist *prometheus.HistogramVec) grpc.UnaryServer - return resp, err - } - } -+ -+// StreamServerInstrumentInterceptor instruments gRPC requests for errors and latency. -+func StreamServerInstrumentInterceptor(hist *prometheus.HistogramVec) grpc.StreamServerInterceptor { -+ return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { -+ begin := time.Now() -+ err := handler(srv, ss) -+ duration := time.Since(begin).Seconds() -+ respStatus := ""success"" -+ if err != nil { -+ if errResp, ok := httpgrpc.HTTPResponseFromError(err); ok { -+ respStatus = strconv.Itoa(int(errResp.Code)) -+ } else { -+ respStatus = ""error"" -+ } -+ } -+ hist.WithLabelValues(gRPC, info.FullMethod, respStatus, ""false"").Observe(duration) -+ return err -+ } -+} -diff --git a/vendor/github.com/weaveworks/common/middleware/grpc_logging.go b/vendor/github.com/weaveworks/common/middleware/grpc_logging.go -index a95189e045eab..f55d50cb9f11e 100644 ---- a/vendor/github.com/weaveworks/common/middleware/grpc_logging.go -+++ b/vendor/github.com/weaveworks/common/middleware/grpc_logging.go -@@ -33,3 +33,16 @@ func (s GRPCServerLog) UnaryServerInterceptor(ctx context.Context, req interface - } - return resp, err - } -+ -+// StreamServerInterceptor returns an interceptor that logs gRPC requests -+func (s GRPCServerLog) StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { -+ begin := time.Now() -+ err := handler(srv, ss) -+ entry := logging.With(ss.Context()).WithFields(log.Fields{""method"": info.FullMethod, ""duration"": time.Since(begin)}) -+ if err != nil { -+ entry.WithError(err).Warn(gRPC) -+ } else { -+ entry.Debugf(""%s (success)"", gRPC) -+ } -+ return err -+} -diff --git a/vendor/github.com/weaveworks/common/server/server.go b/vendor/github.com/weaveworks/common/server/server.go -index 8cf02ee4a6dab..51269f790d411 100644 ---- a/vendor/github.com/weaveworks/common/server/server.go -+++ b/vendor/github.com/weaveworks/common/server/server.go -@@ -18,7 +18,6 @@ import ( - ""golang.org/x/net/context"" - ""google.golang.org/grpc"" - -- ""github.com/weaveworks-experiments/loki/pkg/client"" - ""github.com/weaveworks/common/httpgrpc"" - httpgrpc_server ""github.com/weaveworks/common/httpgrpc/server"" - ""github.com/weaveworks/common/instrument"" -@@ -26,15 +25,6 @@ import ( - ""github.com/weaveworks/common/signals"" - ) - --func init() { -- tracer, err := loki.NewTracer() -- if err != nil { -- panic(fmt.Sprintf(""Failed to create tracer: %v"", err)) -- } else { -- opentracing.InitGlobalTracer(tracer) -- } --} -- - // Config for a Server - type Config struct { - MetricsNamespace string -@@ -49,9 +39,10 @@ type Config struct { - HTTPServerWriteTimeout time.Duration - HTTPServerIdleTimeout time.Duration - -- GRPCOptions []grpc.ServerOption -- GRPCMiddleware []grpc.UnaryServerInterceptor -- HTTPMiddleware []middleware.Interface -+ GRPCOptions []grpc.ServerOption -+ GRPCMiddleware []grpc.UnaryServerInterceptor -+ GRPCStreamMiddleware []grpc.StreamServerInterceptor -+ HTTPMiddleware []middleware.Interface - } - - // RegisterFlags adds the flags required to config this to the given FlagSet -@@ -106,14 +97,25 @@ func New(cfg Config) (*Server, error) { - serverLog := middleware.GRPCServerLog{WithRequest: !cfg.ExcludeRequestInLog} - grpcMiddleware := []grpc.UnaryServerInterceptor{ - serverLog.UnaryServerInterceptor, -- middleware.ServerInstrumentInterceptor(requestDuration), -+ middleware.UnaryServerInstrumentInterceptor(requestDuration), - otgrpc.OpenTracingServerInterceptor(opentracing.GlobalTracer()), - } - grpcMiddleware = append(grpcMiddleware, cfg.GRPCMiddleware...) -+ -+ grpcStreamMiddleware := []grpc.StreamServerInterceptor{ -+ serverLog.StreamServerInterceptor, -+ middleware.StreamServerInstrumentInterceptor(requestDuration), -+ otgrpc.OpenTracingStreamServerInterceptor(opentracing.GlobalTracer()), -+ } -+ grpcStreamMiddleware = append(grpcStreamMiddleware, cfg.GRPCStreamMiddleware...) -+ - grpcOptions := []grpc.ServerOption{ - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( - grpcMiddleware..., - )), -+ grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( -+ grpcStreamMiddleware..., -+ )), - } - grpcOptions = append(grpcOptions, cfg.GRPCOptions...) - grpcServer := grpc.NewServer(grpcOptions...) -@@ -156,7 +158,6 @@ func New(cfg Config) (*Server, error) { - // RegisterInstrumentation on the given router. - func RegisterInstrumentation(router *mux.Router) { - router.Handle(""/metrics"", prometheus.Handler()) -- router.Handle(""/traces"", loki.Handler()) - router.PathPrefix(""/debug/pprof"").Handler(http.DefaultServeMux) - }",unknown,"Update vendor - -Signed-off-by: Tom Wilkie " -282e38548ceb96b1c518010c47b8eabf4317e8fd,2024-04-24 02:10:33,Jay Clifford,"feat: Update getting started demo to Loki 3.0 (#12723) - -Co-authored-by: J Stickler ",False,"diff --git a/docs/sources/get-started/quick-start.md b/docs/sources/get-started/quick-start.md -index 16e14be923acc..b08f07a8e7973 100644 ---- a/docs/sources/get-started/quick-start.md -+++ b/docs/sources/get-started/quick-start.md -@@ -12,14 +12,15 @@ If you want to experiment with Loki, you can run Loki locally using the Docker C - The Docker Compose configuration instantiates the following components, each in its own container: - - - **flog** a sample application which generates log lines. [flog](https://github.com/mingrammer/flog) is a log generator for common log formats. --- **Promtail** which scrapes the log lines from flog, and pushes them to Loki through the gateway. -+- **Grafana Alloy** which scrapes the log lines from flog, and pushes them to Loki through the gateway. - - **Gateway** (NGINX) which receives requests and redirects them to the appropriate container based on the request's URL. --- One Loki **read** component. --- One Loki **write** component. -+- One Loki **read** component (Query Frontend, Querier). -+- One Loki **write** component (Distributor, Ingester). -+- One Loki **backend** component (Index Gateway, Compactor, Ruler, Bloom Compactor (Experimental), Bloom Gateway (Experimental)). - - **Minio** an S3-compatible object store which Loki uses to store its index and chunks. - - **Grafana** which provides visualization of the log lines captured within Loki. - --{{< figure max-width=""75%"" src=""/media/docs/loki/get-started-flog-v2.png"" caption=""Getting started sample application"" alt=""Getting started sample application"">}} -+{{< figure max-width=""75%"" src=""/media/docs/loki/get-started-flog-v3.png"" caption=""Getting started sample application"" alt=""Getting started sample application"">}} - - ## Installing Loki and collecting sample logs - -@@ -41,11 +42,11 @@ This quickstart assumes you are running Linux. - cd evaluate-loki - ``` - --1. Download `loki-config.yaml`, `promtail-local-config.yaml`, and `docker-compose.yaml`: -+1. Download `loki-config.yaml`, `alloy-local-config.yaml`, and `docker-compose.yaml`: - - ```bash - wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/loki-config.yaml -O loki-config.yaml -- wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/promtail-local-config.yaml -O promtail-local-config.yaml -+ wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/alloy-local-config.yaml -O alloy-local-config.yaml - wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/docker-compose.yaml -O docker-compose.yaml - ``` - -@@ -63,16 +64,20 @@ This quickstart assumes you are running Linux. - ✔ Network evaluate-loki_loki Created 0.1s - ✔ Container evaluate-loki-minio-1 Started 0.6s - ✔ Container evaluate-loki-flog-1 Started 0.6s -+ ✔ Container evaluate-loki-backend-1 Started 0.8s - ✔ Container evaluate-loki-write-1 Started 0.8s - ✔ Container evaluate-loki-read-1 Started 0.8s - ✔ Container evaluate-loki-gateway-1 Started 1.1s - ✔ Container evaluate-loki-grafana-1 Started 1.4s -- ✔ Container evaluate-loki-promtail-1 Started 1.4s -+ ✔ Container evaluate-loki-alloy-1 Started 1.4s - ``` - - 1. (Optional) Verify that the Loki cluster is up and running. - - The read component returns `ready` when you point a web browser at [http://localhost:3101/ready](http://localhost:3101/ready). The message `Query Frontend not ready: not ready: number of schedulers this worker is connected to is 0` will show prior to the read component being ready. - - The write component returns `ready` when you point a web browser at [http://localhost:3102/ready](http://localhost:3102/ready). The message `Ingester not ready: waiting for 15s after being ready` will show prior to the write component being ready. -+ -+1. (Optional) Verify that Grafana Alloy is running. -+ - Grafana Alloy's UI can be accessed at [http://localhost:12345](http://localhost:12345). - - ## Viewing your logs in Grafana - -diff --git a/examples/getting-started/alloy-local-config.yaml b/examples/getting-started/alloy-local-config.yaml -new file mode 100644 -index 0000000000000..ff0448ac54353 ---- /dev/null -+++ b/examples/getting-started/alloy-local-config.yaml -@@ -0,0 +1,30 @@ -+discovery.docker ""flog_scrape"" { -+ host = ""unix:///var/run/docker.sock"" -+ refresh_interval = ""5s"" -+} -+ -+discovery.relabel ""flog_scrape"" { -+ targets = [] -+ -+ rule { -+ source_labels = [""__meta_docker_container_name""] -+ regex = ""/(.*)"" -+ target_label = ""container"" -+ } -+} -+ -+loki.source.docker ""flog_scrape"" { -+ host = ""unix:///var/run/docker.sock"" -+ targets = discovery.docker.flog_scrape.targets -+ forward_to = [loki.write.default.receiver] -+ relabel_rules = discovery.relabel.flog_scrape.rules -+ refresh_interval = ""5s"" -+} -+ -+loki.write ""default"" { -+ endpoint { -+ url = ""http://gateway:3100/loki/api/v1/push"" -+ tenant_id = ""tenant1"" -+ } -+ external_labels = {} -+} -diff --git a/examples/getting-started/docker-compose.yaml b/examples/getting-started/docker-compose.yaml -index 83dcde94d273e..449fe55f2b6e2 100644 ---- a/examples/getting-started/docker-compose.yaml -+++ b/examples/getting-started/docker-compose.yaml -@@ -6,7 +6,7 @@ networks: - - services: - read: -- image: grafana/loki:2.9.2 -+ image: grafana/loki:3.0.0 - command: ""-config.file=/etc/loki/config.yaml -target=read"" - ports: - - 3101:3100 -@@ -27,7 +27,7 @@ services: - - loki - - write: -- image: grafana/loki:2.9.2 -+ image: grafana/loki:3.0.0 - command: ""-config.file=/etc/loki/config.yaml -target=write"" - ports: - - 3102:3100 -@@ -45,12 +45,14 @@ services: - networks: - <<: *loki-dns - -- promtail: -- image: grafana/promtail:2.9.2 -+ alloy: -+ image: grafana/alloy:latest - volumes: -- - ./promtail-local-config.yaml:/etc/promtail/config.yaml:ro -+ - ./alloy-local-config.yaml:/etc/alloy/config.alloy:ro - - /var/run/docker.sock:/var/run/docker.sock -- command: -config.file=/etc/promtail/config.yaml -+ command: run --server.http.listen-addr=0.0.0.0:12345 --storage.path=/var/lib/alloy/data /etc/alloy/config.alloy -+ ports: -+ - 12345:12345 - depends_on: - - gateway - networks: -@@ -118,6 +120,20 @@ services: - networks: - - loki - -+ backend: -+ image: grafana/loki:3.0.0 -+ volumes: -+ - ./loki-config.yaml:/etc/loki/config.yaml -+ ports: -+ - ""3100"" -+ - ""7946"" -+ command: ""-config.file=/etc/loki/config.yaml -target=backend -legacy-read-mode=false"" -+ depends_on: -+ - gateway -+ networks: -+ - loki -+ -+ - gateway: - image: nginx:latest - depends_on: -@@ -186,6 +202,7 @@ services: - retries: 5 - networks: - - loki -+ - - flog: - image: mingrammer/flog -diff --git a/examples/getting-started/loki-config.yaml b/examples/getting-started/loki-config.yaml -index 73ca66f78796a..3228092e4e8f4 100644 ---- a/examples/getting-started/loki-config.yaml -+++ b/examples/getting-started/loki-config.yaml -@@ -1,9 +1,17 @@ - --- - server: -+ http_listen_address: 0.0.0.0 - http_listen_port: 3100 -+ - memberlist: -- join_members: -- - loki:7946 -+ join_members: [""read"", ""write"", ""backend""] -+ dead_node_reclaim_time: 30s -+ gossip_to_dead_nodes_time: 15s -+ left_ingesters_timeout: 30s -+ bind_addr: ['0.0.0.0'] -+ bind_port: 7946 -+ gossip_interval: 2s -+ - schema_config: - configs: - - from: 2021-08-01 -@@ -16,6 +24,7 @@ schema_config: - common: - path_prefix: /loki - replication_factor: 1 -+ compactor_address: http://backend:3100 - storage: - s3: - endpoint: minio:9000 -@@ -31,3 +40,6 @@ ruler: - storage: - s3: - bucketnames: loki-ruler -+ -+compactor: -+ working_directory: /tmp/compactor -\ No newline at end of file -diff --git a/examples/getting-started/promtail-local-config.yaml b/examples/getting-started/promtail-local-config.yaml -deleted file mode 100644 -index dcb2d3eed81a2..0000000000000 ---- a/examples/getting-started/promtail-local-config.yaml -+++ /dev/null -@@ -1,22 +0,0 @@ ----- --server: -- http_listen_port: 9080 -- grpc_listen_port: 0 -- --positions: -- filename: /tmp/positions.yaml -- --clients: -- - url: http://gateway:3100/loki/api/v1/push -- tenant_id: tenant1 -- --scrape_configs: -- - job_name: flog_scrape -- docker_sd_configs: -- - host: unix:///var/run/docker.sock -- refresh_interval: 5s -- relabel_configs: -- - source_labels: ['__meta_docker_container_name'] -- regex: '/(.*)' -- target_label: 'container' --",feat,"Update getting started demo to Loki 3.0 (#12723) - -Co-authored-by: J Stickler " -d0baa40df3f318e0192c6807d0009c86e0614164,2023-07-01 01:13:10,Hamish Forbes,"Only modify identical timestamps when increment-duplicate-timestamps enabled (#9437) - -When `-validation.increment-duplicate-timestamps` is enabled out of -order log entries within the same push request can have their timestamps -incorrectly mutated. - -The code is currently just looking for 2 consecutive entries where the -timestamp is not increasing. -It should only affect entries where the timestamp is a *duplicate* - -Test push request payload: - -```json -{ - ""streams"": [ - { - ""stream"": { - ""increment_duplicate_timestamp"": ""fixed|true|false"" - }, - ""values"": [ - [ - ""1683665111628000000"", - ""1a: 1683665111628000000"" - ], - [ - ""1683665111628000000"", - ""1b: 1683665111628000000"" - ], - [ - ""1683665111628000003"", - ""3: 1683665111628000003"" - ], - [ - ""1683665111628000002"", - ""2: 1683665111628000002"" - ] - ] - } - ] -} -```` - -As only the first 2 of these timestamps are the same I would expect all -but `1b` to have the timestamps recorded as-is and the logs returned by -a query in the order: 1a,1b,2,3. - -Actual Results: - -``` -> logcli query --forward --limit 100 -z UTC -q --no-labels --since 60m -o jsonl '{increment_duplicate_timestamp=""fixed""}' | jq -{ - ""line"": ""1a: 1683665111628000000"", - ""timestamp"": ""2023-05-09T20:45:11.628Z"" -} -{ - ""line"": ""1b: 1683665111628000000"", - ""timestamp"": ""2023-05-09T20:45:11.628000001Z"" -} -{ - ""line"": ""2: 1683665111628000002"", - ""timestamp"": ""2023-05-09T20:45:11.628000002Z"" -} -{ - ""line"": ""3: 1683665111628000003"", - ""timestamp"": ""2023-05-09T20:45:11.628000003Z"" -} - -> logcli query --forward --limit 100 -z UTC -q --no-labels --since 60m -o jsonl '{increment_duplicate_timestamp=""true""}' | jq -{ - ""line"": ""1a: 1683665111628000000"", - ""timestamp"": ""2023-05-09T20:45:11.628Z"" -} -{ - ""line"": ""1b: 1683665111628000000"", - ""timestamp"": ""2023-05-09T20:45:11.628000001Z"" -} -{ - ""line"": ""3: 1683665111628000003"", - ""timestamp"": ""2023-05-09T20:45:11.628000003Z"" -} -{ - ""line"": ""2: 1683665111628000002"", - ""timestamp"": ""2023-05-09T20:45:11.628000004Z"" -} - -> logcli query --forward --limit 100 -z UTC -q --no-labels --since 60m -o jsonl '{increment_duplicate_timestamp=""false""}' | jq -{ - ""line"": ""1a: 1683665111628000000"", - ""timestamp"": ""2023-05-09T20:45:11.628Z"" -} -{ - ""line"": ""1b: 1683665111628000000"", - ""timestamp"": ""2023-05-09T20:45:11.628Z"" -} -{ - ""line"": ""2: 1683665111628000002"", - ""timestamp"": ""2023-05-09T20:45:11.628000002Z"" -} -{ - ""line"": ""3: 1683665111628000003"", - ""timestamp"": ""2023-05-09T20:45:11.628000003Z"" -} -``` - -`true` and `false` were written to Loki 2.8.2 with the -`-validation.increment-duplicate-timestamps` enabled or disabled -`fixed` with this patch applied and the flag enabled - -**Checklist** -- [x] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [ ] Documentation added -- [x] Tests updated -- [ ] `CHANGELOG.md` updated -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/upgrading/_index.md`",False,"diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go -index 7c76112938e8d..fb39282e3cf19 100644 ---- a/pkg/distributor/distributor.go -+++ b/pkg/distributor/distributor.go -@@ -336,6 +336,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log - - n := 0 - pushSize := 0 -+ prevTs := stream.Entries[0].Timestamp - for _, entry := range stream.Entries { - if err := d.validator.ValidateEntry(validationContext, stream.Labels, entry); err != nil { - d.writeFailuresManager.Log(tenantID, err) -@@ -348,12 +349,15 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log - // If configured for this tenant, increment duplicate timestamps. Note, this is imperfect - // since Loki will accept out of order writes it doesn't account for separate - // pushes with overlapping time ranges having entries with duplicate timestamps -+ - if validationContext.incrementDuplicateTimestamps && n != 0 { - // Traditional logic for Loki is that 2 lines with the same timestamp and - // exact same content will be de-duplicated, (i.e. only one will be stored, others dropped) - // To maintain this behavior, only increment the timestamp if the log content is different -- if stream.Entries[n-1].Line != entry.Line { -- stream.Entries[n].Timestamp = maxT(entry.Timestamp, stream.Entries[n-1].Timestamp.Add(1*time.Nanosecond)) -+ if stream.Entries[n-1].Line != entry.Line && (entry.Timestamp == prevTs || entry.Timestamp == stream.Entries[n-1].Timestamp) { -+ stream.Entries[n].Timestamp = stream.Entries[n-1].Timestamp.Add(1 * time.Nanosecond) -+ } else { -+ prevTs = entry.Timestamp - } - } - -diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go -index 90ae38808a95f..476df6ca99c40 100644 ---- a/pkg/distributor/distributor_test.go -+++ b/pkg/distributor/distributor_test.go -@@ -358,6 +358,34 @@ func Test_IncrementTimestamp(t *testing.T) { - }, - }, - }, -+ ""incrementing enabled, no dupes, out of order"": { -+ limits: incrementingEnabled, -+ push: &logproto.PushRequest{ -+ Streams: []logproto.Stream{ -+ { -+ Labels: ""{job=\""foo\""}"", -+ Entries: []logproto.Entry{ -+ {Timestamp: time.Unix(123456, 0), Line: ""hey1""}, -+ {Timestamp: time.Unix(123458, 0), Line: ""hey3""}, -+ {Timestamp: time.Unix(123457, 0), Line: ""hey2""}, -+ }, -+ }, -+ }, -+ }, -+ expectedPush: &logproto.PushRequest{ -+ Streams: []logproto.Stream{ -+ { -+ Labels: ""{job=\""foo\""}"", -+ Hash: 0x8eeb87f5eb220480, -+ Entries: []logproto.Entry{ -+ {Timestamp: time.Unix(123456, 0), Line: ""hey1""}, -+ {Timestamp: time.Unix(123458, 0), Line: ""hey3""}, -+ {Timestamp: time.Unix(123457, 0), Line: ""hey2""}, -+ }, -+ }, -+ }, -+ }, -+ }, - } - - for testName, testData := range tests {",unknown,"Only modify identical timestamps when increment-duplicate-timestamps enabled (#9437) - -When `-validation.increment-duplicate-timestamps` is enabled out of -order log entries within the same push request can have their timestamps -incorrectly mutated. - -The code is currently just looking for 2 consecutive entries where the -timestamp is not increasing. -It should only affect entries where the timestamp is a *duplicate* - -Test push request payload: - -```json -{ - ""streams"": [ - { - ""stream"": { - ""increment_duplicate_timestamp"": ""fixed|true|false"" - }, - ""values"": [ - [ - ""1683665111628000000"", - ""1a: 1683665111628000000"" - ], - [ - ""1683665111628000000"", - ""1b: 1683665111628000000"" - ], - [ - ""1683665111628000003"", - ""3: 1683665111628000003"" - ], - [ - ""1683665111628000002"", - ""2: 1683665111628000002"" - ] - ] - } - ] -} -```` - -As only the first 2 of these timestamps are the same I would expect all -but `1b` to have the timestamps recorded as-is and the logs returned by -a query in the order: 1a,1b,2,3. - -Actual Results: - -``` -> logcli query --forward --limit 100 -z UTC -q --no-labels --since 60m -o jsonl '{increment_duplicate_timestamp=""fixed""}' | jq -{ - ""line"": ""1a: 1683665111628000000"", - ""timestamp"": ""2023-05-09T20:45:11.628Z"" -} -{ - ""line"": ""1b: 1683665111628000000"", - ""timestamp"": ""2023-05-09T20:45:11.628000001Z"" -} -{ - ""line"": ""2: 1683665111628000002"", - ""timestamp"": ""2023-05-09T20:45:11.628000002Z"" -} -{ - ""line"": ""3: 1683665111628000003"", - ""timestamp"": ""2023-05-09T20:45:11.628000003Z"" -} - -> logcli query --forward --limit 100 -z UTC -q --no-labels --since 60m -o jsonl '{increment_duplicate_timestamp=""true""}' | jq -{ - ""line"": ""1a: 1683665111628000000"", - ""timestamp"": ""2023-05-09T20:45:11.628Z"" -} -{ - ""line"": ""1b: 1683665111628000000"", - ""timestamp"": ""2023-05-09T20:45:11.628000001Z"" -} -{ - ""line"": ""3: 1683665111628000003"", - ""timestamp"": ""2023-05-09T20:45:11.628000003Z"" -} -{ - ""line"": ""2: 1683665111628000002"", - ""timestamp"": ""2023-05-09T20:45:11.628000004Z"" -} - -> logcli query --forward --limit 100 -z UTC -q --no-labels --since 60m -o jsonl '{increment_duplicate_timestamp=""false""}' | jq -{ - ""line"": ""1a: 1683665111628000000"", - ""timestamp"": ""2023-05-09T20:45:11.628Z"" -} -{ - ""line"": ""1b: 1683665111628000000"", - ""timestamp"": ""2023-05-09T20:45:11.628Z"" -} -{ - ""line"": ""2: 1683665111628000002"", - ""timestamp"": ""2023-05-09T20:45:11.628000002Z"" -} -{ - ""line"": ""3: 1683665111628000003"", - ""timestamp"": ""2023-05-09T20:45:11.628000003Z"" -} -``` - -`true` and `false` were written to Loki 2.8.2 with the -`-validation.increment-duplicate-timestamps` enabled or disabled -`fixed` with this patch applied and the flag enabled - -**Checklist** -- [x] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [ ] Documentation added -- [x] Tests updated -- [ ] `CHANGELOG.md` updated -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/upgrading/_index.md`" -0e329e18cc17e6b6675e0379ea0e924198fe5711,2025-01-11 00:06:40,renovate[bot],"fix(deps): update module github.com/aws/aws-sdk-go-v2/service/s3 to v1.72.2 (#15685) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod -index 1940e75373d53..64bf6b8636a8d 100644 ---- a/tools/lambda-promtail/go.mod -+++ b/tools/lambda-promtail/go.mod -@@ -6,7 +6,7 @@ require ( - github.com/aws/aws-lambda-go v1.47.0 - github.com/aws/aws-sdk-go-v2 v1.32.8 - github.com/aws/aws-sdk-go-v2/config v1.28.9 -- github.com/aws/aws-sdk-go-v2/service/s3 v1.72.1 -+ github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2 - github.com/go-kit/log v0.2.1 - github.com/gogo/protobuf v1.3.2 - github.com/golang/snappy v0.0.4 -@@ -30,11 +30,11 @@ require ( - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect -- github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.26 // indirect -+ github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.27 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect -- github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.7 // indirect -+ github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.8 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8 // indirect -- github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.7 // indirect -+ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.8 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.24.9 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.5 // indirect -diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum -index 29032116eef76..19664aa925de3 100644 ---- a/tools/lambda-promtail/go.sum -+++ b/tools/lambda-promtail/go.sum -@@ -64,18 +64,18 @@ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27 h1:l+X4K77Dui85pIj5fo - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27/go.mod h1:KvZXSFEXm6x84yE8qffKvT3x8J5clWnVFXphpohhzJ8= - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= --github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.26 h1:GeNJsIFHB+WW5ap2Tec4K6dzcVTsRbsT1Lra46Hv9ME= --github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.26/go.mod h1:zfgMpwHDXX2WGoG84xG2H+ZlPTkJUU4YUvx2svLQYWo= -+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.27 h1:AmB5QxnD+fBFrg9LcqzkgF/CaYvMyU/BTlejG4t1S7Q= -+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.27/go.mod h1:Sai7P3xTiyv9ZUYO3IFxMnmiIP759/67iQbU4kdmkyU= - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= --github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.7 h1:tB4tNw83KcajNAzaIMhkhVI2Nt8fAZd5A5ro113FEMY= --github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.7/go.mod h1:lvpyBGkZ3tZ9iSsUIcC2EWp+0ywa7aK3BLT+FwZi+mQ= -+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.8 h1:iwYS40JnrBeA9e9aI5S6KKN4EB2zR4iUVYN0nwVivz4= -+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.8/go.mod h1:Fm9Mi+ApqmFiknZtGpohVcBGvpTu542VC4XO9YudRi0= - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8 h1:cWno7lefSH6Pp+mSznagKCgfDGeZRin66UvYUqAkyeA= - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8/go.mod h1:tPD+VjU3ABTBoEJ3nctu5Nyg4P4yjqSH5bJGGkY4+XE= --github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.7 h1:Hi0KGbrnr57bEHWM0bJ1QcBzxLrL/k2DHvGYhb8+W1w= --github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.7/go.mod h1:wKNgWgExdjjrm4qvfbTorkvocEstaoDl4WCvGfeCy9c= --github.com/aws/aws-sdk-go-v2/service/s3 v1.72.1 h1:+IrM0EXV6ozLqJs3Kq2iwQGJBWmgRiYBXWETQQUMZRY= --github.com/aws/aws-sdk-go-v2/service/s3 v1.72.1/go.mod h1:r+xl5yzMk9083rMR+sJ5TYj9Tihvf/l1oxzZXDgGj2Q= -+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.8 h1:/Mn7gTedG86nbpjT4QEKsN1D/fThiYe1qvq7WsBGNHg= -+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.8/go.mod h1:Ae3va9LPmvjj231ukHB6UeT8nS7wTPfC3tMZSZMwNYg= -+github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2 h1:a7aQ3RW+ug4IbhoQp29NZdc7vqrzKZZfWZSaQAXOZvQ= -+github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2/go.mod h1:xMekrnhmJ5aqmyxtmALs7mlvXw5xRh+eYjOjvrIIFJ4= - github.com/aws/aws-sdk-go-v2/service/sso v1.24.9 h1:YqtxripbjWb2QLyzRK9pByfEDvgg95gpC2AyDq4hFE8= - github.com/aws/aws-sdk-go-v2/service/sso v1.24.9/go.mod h1:lV8iQpg6OLOfBnqbGMBKYjilBlf633qwHnBEiMSPoHY= - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8 h1:6dBT1Lz8fK11m22R+AqfRsFn8320K0T5DTGxxOQBSMw=",fix,"update module github.com/aws/aws-sdk-go-v2/service/s3 to v1.72.2 (#15685) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -1bb535a730856bc992493c94991765ada682e4bd,2022-08-30 13:44:03,Gerard Vanloo,operator: Fixing logcli pod image value for operator addons (#6997),False,"diff --git a/operator/hack/addons_dev.yaml b/operator/hack/addons_dev.yaml -index 3897e5a24e2b0..11f92bf7d6f66 100644 ---- a/operator/hack/addons_dev.yaml -+++ b/operator/hack/addons_dev.yaml -@@ -29,7 +29,7 @@ spec: - spec: - containers: - - name: logcli -- image: docker.io/grafana/logcli:2.6.1 -+ image: docker.io/grafana/logcli:2.6.1-amd64 - imagePullPolicy: IfNotPresent - command: - - /bin/sh -diff --git a/operator/hack/addons_ocp.yaml b/operator/hack/addons_ocp.yaml -index c13eadfb57e75..8f000d3c19ab1 100644 ---- a/operator/hack/addons_ocp.yaml -+++ b/operator/hack/addons_ocp.yaml -@@ -29,7 +29,7 @@ spec: - spec: - containers: - - name: logcli -- image: docker.io/grafana/logcli:2.6.1 -+ image: docker.io/grafana/logcli:2.6.1-amd64 - imagePullPolicy: IfNotPresent - command: - - /bin/sh",operator,Fixing logcli pod image value for operator addons (#6997) -760c255431bec88173112a4d69eb935602dab6c1,2023-08-25 21:57:17,Kaviraj Kanagaraj,"doc(schema): Start recommending `v12` schema instead of `v11` (#10355) - -**What this PR does / why we need it**: -`v12` is stable schema with more efficient/stable chunks and index -formats. Start recommending it instead of old `v11`. - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - -**Checklist** -- [x] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [x] Documentation added - ---------- - -Signed-off-by: Kaviraj ",False,"diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md -index 1821c673865cd..179b30c046873 100644 ---- a/docs/sources/configure/_index.md -+++ b/docs/sources/configure/_index.md -@@ -4137,7 +4137,7 @@ The `period_config` block configures what index schemas should be used for from - # If omitted, defaults to the same value as store. - [object_store: | default = """"] - --# The schema version to use, current recommended schema is v11. -+# The schema version to use, current recommended schema is v12. - [schema: | default = """"] - - # Configures how the index is updated and stored. -diff --git a/pkg/storage/config/schema_config.go b/pkg/storage/config/schema_config.go -index f035e79f06132..5bf215049d48e 100644 ---- a/pkg/storage/config/schema_config.go -+++ b/pkg/storage/config/schema_config.go -@@ -159,7 +159,7 @@ type PeriodConfig struct { - IndexType string `yaml:""store"" doc:""description=store and object_store below affect which key is used.\nWhich store to use for the index. Either aws, aws-dynamo, gcp, bigtable, bigtable-hashed, cassandra, boltdb or boltdb-shipper. ""` - // type of object client to use; if omitted, defaults to store. - ObjectType string `yaml:""object_store"" doc:""description=Which store to use for the chunks. Either aws, azure, gcp, bigtable, gcs, cassandra, swift, filesystem or a named_store (refer to named_stores_config). If omitted, defaults to the same value as store.""` -- Schema string `yaml:""schema"" doc:""description=The schema version to use, current recommended schema is v11.""` -+ Schema string `yaml:""schema"" doc:""description=The schema version to use, current recommended schema is v12.""` - IndexTables PeriodicTableConfig `yaml:""index"" doc:""description=Configures how the index is updated and stored.""` - ChunkTables PeriodicTableConfig `yaml:""chunks"" doc:""description=Configured how the chunks are updated and stored.""` - RowShards uint32 `yaml:""row_shards"" doc:""description=How many shards will be created. Only used if schema is v10 or greater.""`",doc,"Start recommending `v12` schema instead of `v11` (#10355) - -**What this PR does / why we need it**: -`v12` is stable schema with more efficient/stable chunks and index -formats. Start recommending it instead of old `v11`. - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - -**Checklist** -- [x] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [x] Documentation added - ---------- - -Signed-off-by: Kaviraj " -bc9bbb8a01393f2caa54a907bee5760f6b348629,2025-03-03 02:22:31,renovate[bot],"fix(deps): update module github.com/ibm/sarama to v1.45.1 (main) (#16522) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/go.mod b/go.mod -index 070209a04e12e..7eebf6b76a3a2 100644 ---- a/go.mod -+++ b/go.mod -@@ -13,7 +13,7 @@ require ( - github.com/Azure/azure-storage-blob-go v0.15.0 - github.com/Azure/go-autorest/autorest/adal v0.9.24 - github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 -- github.com/IBM/sarama v1.45.0 -+ github.com/IBM/sarama v1.45.1 - github.com/Masterminds/sprig/v3 v3.3.0 - github.com/NYTimes/gziphandler v1.1.1 - github.com/Workiva/go-datastructures v1.1.5 -diff --git a/go.sum b/go.sum -index 11bb813e574aa..fe065e8b85401 100644 ---- a/go.sum -+++ b/go.sum -@@ -138,8 +138,8 @@ github.com/IBM/go-sdk-core/v5 v5.18.5 h1:g0JRl3sYXJczB/yuDlrN6x22LJ6jIxhp0Sa4ARN - github.com/IBM/go-sdk-core/v5 v5.18.5/go.mod h1:KonTFRR+8ZSgw5cxBSYo6E4WZoY1+7n1kfHM82VcjFU= - github.com/IBM/ibm-cos-sdk-go v1.12.1 h1:pWs5c5/j9PNJE1lIQhYtzpdCxu2fpvCq9PHs6/nDjyI= - github.com/IBM/ibm-cos-sdk-go v1.12.1/go.mod h1:7vmUThyAq4+AD1eEyGZi90ir06Z9YhsEzLBsdGPfcqo= --github.com/IBM/sarama v1.45.0 h1:IzeBevTn809IJ/dhNKhP5mpxEXTmELuezO2tgHD9G5E= --github.com/IBM/sarama v1.45.0/go.mod h1:EEay63m8EZkeumco9TDXf2JT3uDnZsZqFgV46n4yZdY= -+github.com/IBM/sarama v1.45.1 h1:nY30XqYpqyXOXSNoe2XCgjj9jklGM1Ye94ierUb1jQ0= -+github.com/IBM/sarama v1.45.1/go.mod h1:qifDhA3VWSrQ1TjSMyxDl3nYL3oX2C83u+G6L79sq4w= - github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= - github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20240322194317-344980fda573 h1:DCPjdUAi+jcGnL7iN+A7uNY8xG584oMRuisYh/VE21E= - github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20240322194317-344980fda573/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= -diff --git a/vendor/github.com/IBM/sarama/.golangci.yml b/vendor/github.com/IBM/sarama/.golangci.yml -index 2e029401d881a..359c878817202 100644 ---- a/vendor/github.com/IBM/sarama/.golangci.yml -+++ b/vendor/github.com/IBM/sarama/.golangci.yml -@@ -1,17 +1,15 @@ -+# yaml-language-server: $schema=https://golangci-lint.run/jsonschema/golangci.jsonschema.json - run: -- go: ""1.20"" - timeout: 5m -- deadline: 10m - - linters-settings: - govet: -- check-shadowing: false -- golint: -- min-confidence: 0 -+ enable-all: true -+ disable: -+ - fieldalignment -+ - shadow - gocyclo: - min-complexity: 99 -- maligned: -- suggest-new: true - dupl: - threshold: 100 - goconst: -@@ -72,7 +70,6 @@ linters: - - govet - - misspell - - nilerr -- - staticcheck - - typecheck - - unconvert - - unused -diff --git a/vendor/github.com/IBM/sarama/.pre-commit-config.yaml b/vendor/github.com/IBM/sarama/.pre-commit-config.yaml -index 1e64cc0d88ab2..2aa1fcb1a7e7a 100644 ---- a/vendor/github.com/IBM/sarama/.pre-commit-config.yaml -+++ b/vendor/github.com/IBM/sarama/.pre-commit-config.yaml -@@ -32,10 +32,10 @@ repos: - files: \.go$ - args: [] - - repo: https://github.com/gitleaks/gitleaks -- rev: v8.21.2 -+ rev: v8.23.3 - hooks: - - id: gitleaks - - repo: https://github.com/golangci/golangci-lint -- rev: v1.61.0 -+ rev: v1.63.4 - hooks: - - id: golangci-lint -diff --git a/vendor/github.com/IBM/sarama/Dockerfile.kafka b/vendor/github.com/IBM/sarama/Dockerfile.kafka -index d2234e3918f2d..2b5a4cfe34e03 100644 ---- a/vendor/github.com/IBM/sarama/Dockerfile.kafka -+++ b/vendor/github.com/IBM/sarama/Dockerfile.kafka -@@ -1,4 +1,4 @@ --FROM registry.access.redhat.com/ubi9/ubi-minimal:9.5@sha256:daa61d6103e98bccf40d7a69a0d4f8786ec390e2204fd94f7cc49053e9949360 -+FROM registry.access.redhat.com/ubi9/ubi-minimal:9.5@sha256:3902bab19972cd054fd08b2a4e08612ae7e68861ee5d9a5cf22d828f27e2f479 - - USER root - -@@ -29,10 +29,11 @@ RUN --mount=type=bind,target=.,rw=true \ - && chmod a+rw ""/opt/kafka-${KAFKA_VERSION}"" \ - && if [ ""$KAFKA_VERSION"" = ""4.0.0"" ]; then \ - microdnf install -y java-17-openjdk-devel \ -- && git clone --depth=50 --single-branch -b 4.0 https://github.com/apache/kafka /usr/src/kafka \ -+ && mkdir -p /usr/src/kafka \ -+ && : PIN TO COMMIT OF 4.0 BRANCH BEFORE KAFKA-17616 ZOOKEEPER REMOVAL STARTED \ -+ && curl --fail -sSL https://github.com/apache/kafka/archive/d1504649fbe45064a0b0120ff33de9326b2fc662.tar.gz | \ -+ tar zxf - -C /usr/src/kafka --strip-components=1 \ - && cd /usr/src/kafka \ -- && : PIN TO COMMIT BEFORE KAFKA-17616 ZOOKEEPER REMOVAL STARTED \ -- && git reset --hard d1504649fb \ - && export JAVA_TOOL_OPTIONS=-XX:MaxRAMPercentage=80 \ - && sed -e '/version=/s/-SNAPSHOT//' -e '/org.gradle.jvmargs/d' -e '/org.gradle.parallel/s/true/false/' -i gradle.properties && ./gradlew -PmaxParallelForks=1 -PmaxScalacThreads=1 --no-daemon releaseTarGz -x siteDocsTar -x javadoc \ - && tar xzf core/build/distributions/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz --strip-components=1 -C ""/opt/kafka-${KAFKA_VERSION}"" \ -diff --git a/vendor/github.com/IBM/sarama/async_producer.go b/vendor/github.com/IBM/sarama/async_producer.go -index 5f257524b6477..e34eed5445a7a 100644 ---- a/vendor/github.com/IBM/sarama/async_producer.go -+++ b/vendor/github.com/IBM/sarama/async_producer.go -@@ -16,9 +16,16 @@ import ( - // ErrProducerRetryBufferOverflow is returned when the bridging retry buffer is full and OOM prevention needs to be applied. - var ErrProducerRetryBufferOverflow = errors.New(""retry buffer full: message discarded to prevent buffer overflow"") - --// minFunctionalRetryBufferLength is the lower limit of Producer.Retry.MaxBufferLength for it to function. --// Any non-zero maxBufferLength but less than this lower limit is pushed to the lower limit. --const minFunctionalRetryBufferLength = 4 * 1024 -+const ( -+ // minFunctionalRetryBufferLength defines the minimum number of messages the retry buffer must support. -+ // If Producer.Retry.MaxBufferLength is set to a non-zero value below this limit, it will be adjusted to this value. -+ // This ensures the retry buffer remains functional under typical workloads. -+ minFunctionalRetryBufferLength = 4 * 1024 -+ // minFunctionalRetryBufferBytes defines the minimum total byte size the retry buffer must support. -+ // If Producer.Retry.MaxBufferBytes is set to a non-zero value below this limit, it will be adjusted to this value. -+ // A 32 MB lower limit ensures sufficient capacity for retrying larger messages without exhausting resources. -+ minFunctionalRetryBufferBytes = 32 * 1024 * 1024 -+) - - // AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages - // to the correct broker for the provided topic-partition, refreshing metadata as appropriate, -@@ -1214,11 +1221,22 @@ func (bp *brokerProducer) handleError(sent *produceSet, err error) { - // effectively a ""bridge"" between the flushers and the dispatcher in order to avoid deadlock - // based on https://godoc.org/github.com/eapache/channels#InfiniteChannel - func (p *asyncProducer) retryHandler() { -- maxBufferSize := p.conf.Producer.Retry.MaxBufferLength -- if 0 < maxBufferSize && maxBufferSize < minFunctionalRetryBufferLength { -- maxBufferSize = minFunctionalRetryBufferLength -+ maxBufferLength := p.conf.Producer.Retry.MaxBufferLength -+ if 0 < maxBufferLength && maxBufferLength < minFunctionalRetryBufferLength { -+ maxBufferLength = minFunctionalRetryBufferLength -+ } -+ -+ maxBufferBytes := p.conf.Producer.Retry.MaxBufferBytes -+ if 0 < maxBufferBytes && maxBufferBytes < minFunctionalRetryBufferBytes { -+ maxBufferBytes = minFunctionalRetryBufferBytes - } - -+ version := 1 -+ if p.conf.Version.IsAtLeast(V0_11_0_0) { -+ version = 2 -+ } -+ -+ var currentByteSize int64 - var msg *ProducerMessage - buf := queue.New() - -@@ -1229,7 +1247,8 @@ func (p *asyncProducer) retryHandler() { - select { - case msg = <-p.retries: - case p.input <- buf.Peek().(*ProducerMessage): -- buf.Remove() -+ msgToRemove := buf.Remove().(*ProducerMessage) -+ currentByteSize -= int64(msgToRemove.ByteSize(version)) - continue - } - } -@@ -1239,17 +1258,22 @@ func (p *asyncProducer) retryHandler() { - } - - buf.Add(msg) -+ currentByteSize += int64(msg.ByteSize(version)) - -- if maxBufferSize > 0 && buf.Length() >= maxBufferSize { -- msgToHandle := buf.Peek().(*ProducerMessage) -- if msgToHandle.flags == 0 { -- select { -- case p.input <- msgToHandle: -- buf.Remove() -- default: -- buf.Remove() -- p.returnError(msgToHandle, ErrProducerRetryBufferOverflow) -- } -+ if (maxBufferLength <= 0 || buf.Length() < maxBufferLength) && (maxBufferBytes <= 0 || currentByteSize < maxBufferBytes) { -+ continue -+ } -+ -+ msgToHandle := buf.Peek().(*ProducerMessage) -+ if msgToHandle.flags == 0 { -+ select { -+ case p.input <- msgToHandle: -+ buf.Remove() -+ currentByteSize -= int64(msgToHandle.ByteSize(version)) -+ default: -+ buf.Remove() -+ currentByteSize -= int64(msgToHandle.ByteSize(version)) -+ p.returnError(msgToHandle, ErrProducerRetryBufferOverflow) - } - } - } -diff --git a/vendor/github.com/IBM/sarama/balance_strategy.go b/vendor/github.com/IBM/sarama/balance_strategy.go -index b5bc30a13bdfc..5946c962f61f3 100644 ---- a/vendor/github.com/IBM/sarama/balance_strategy.go -+++ b/vendor/github.com/IBM/sarama/balance_strategy.go -@@ -989,7 +989,7 @@ func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicParti - return reversePairPartition - } - --//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) -+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag) - func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) { - if src == dst { - return currentPath, false -@@ -1024,7 +1024,7 @@ func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, cur - return currentPath, false - } - --//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) -+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag) - func (p *partitionMovements) in(cycle []string, cycles [][]string) bool { - superCycle := make([]string, len(cycle)-1) - for i := 0; i < len(cycle)-1; i++ { -@@ -1039,7 +1039,7 @@ func (p *partitionMovements) in(cycle []string, cycles [][]string) bool { - return false - } - --//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) -+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag) - func (p *partitionMovements) hasCycles(pairs []consumerPair) bool { - cycles := make([][]string, 0) - for _, pair := range pairs { -@@ -1071,7 +1071,7 @@ func (p *partitionMovements) hasCycles(pairs []consumerPair) bool { - return false - } - --//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) -+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag) - func (p *partitionMovements) isSticky() bool { - for topic, movements := range p.PartitionMovementsByTopic { - movementPairs := make([]consumerPair, len(movements)) -@@ -1089,7 +1089,7 @@ func (p *partitionMovements) isSticky() bool { - return true - } - --//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) -+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag) - func indexOfSubList(source []string, target []string) int { - targetSize := len(target) - maxCandidate := len(source) - targetSize -diff --git a/vendor/github.com/IBM/sarama/broker.go b/vendor/github.com/IBM/sarama/broker.go -index c4f1005f56e8b..5f99d242102c2 100644 ---- a/vendor/github.com/IBM/sarama/broker.go -+++ b/vendor/github.com/IBM/sarama/broker.go -@@ -1570,7 +1570,11 @@ func (b *Broker) createSaslAuthenticateRequest(msg []byte) *SaslAuthenticateRequ - func buildClientFirstMessage(token *AccessToken) ([]byte, error) { - var ext string - -- if token.Extensions != nil && len(token.Extensions) > 0 { -+ if token == nil { -+ return []byte{}, fmt.Errorf(""failed to build client first message: token is nil"") -+ } -+ -+ if len(token.Extensions) > 0 { - if _, ok := token.Extensions[SASLExtKeyAuth]; ok { - return []byte{}, fmt.Errorf(""the extension `%s` is invalid"", SASLExtKeyAuth) - } -diff --git a/vendor/github.com/IBM/sarama/client.go b/vendor/github.com/IBM/sarama/client.go -index 5c54b446130de..21d04000bfee1 100644 ---- a/vendor/github.com/IBM/sarama/client.go -+++ b/vendor/github.com/IBM/sarama/client.go -@@ -113,6 +113,9 @@ type Client interface { - // LeastLoadedBroker retrieves broker that has the least responses pending - LeastLoadedBroker() *Broker - -+ // check if partition is readable -+ PartitionNotReadable(topic string, partition int32) bool -+ - // Close shuts down all broker connections managed by this client. It is required - // to call this function before a client object passes out of scope, as it will - // otherwise leak memory. You must close any Producers or Consumers using a client -@@ -1283,3 +1286,14 @@ type nopCloserClient struct { - func (ncc *nopCloserClient) Close() error { - return nil - } -+ -+func (client *client) PartitionNotReadable(topic string, partition int32) bool { -+ client.lock.RLock() -+ defer client.lock.RUnlock() -+ -+ pm := client.metadata[topic][partition] -+ if pm == nil { -+ return true -+ } -+ return pm.Leader == -1 -+} -diff --git a/vendor/github.com/IBM/sarama/config.go b/vendor/github.com/IBM/sarama/config.go -index 8c7c4c9853177..6a198dc899ec3 100644 ---- a/vendor/github.com/IBM/sarama/config.go -+++ b/vendor/github.com/IBM/sarama/config.go -@@ -276,6 +276,13 @@ type Config struct { - // Any value between 0 and 4096 is pushed to 4096. - // A zero or negative value indicates unlimited. - MaxBufferLength int -+ // The maximum total byte size of messages in the bridging buffer between `input` -+ // and `retries` channels in AsyncProducer#retryHandler. -+ // This limit prevents the buffer from consuming excessive memory. -+ // Defaults to 0 for unlimited. -+ // Any value between 0 and 32 MB is pushed to 32 MB. -+ // A zero or negative value indicates unlimited. -+ MaxBufferBytes int64 - } - - // Interceptors to be called when the producer dispatcher reads the -diff --git a/vendor/github.com/IBM/sarama/consumer_group.go b/vendor/github.com/IBM/sarama/consumer_group.go -index 53b64dd3b88c2..239da4619941f 100644 ---- a/vendor/github.com/IBM/sarama/consumer_group.go -+++ b/vendor/github.com/IBM/sarama/consumer_group.go -@@ -861,18 +861,32 @@ func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims - return nil, err - } - -- // start consuming -+ // start consuming each topic partition in its own goroutine - for topic, partitions := range claims { - for _, partition := range partitions { -- sess.waitGroup.Add(1) -- -+ sess.waitGroup.Add(1) // increment wait group before spawning goroutine - go func(topic string, partition int32) { - defer sess.waitGroup.Done() -- -- // cancel the as session as soon as the first -- // goroutine exits -+ // cancel the group session as soon as any of the consume calls return - defer sess.cancel() - -+ // if partition not currently readable, wait for it to become readable -+ if sess.parent.client.PartitionNotReadable(topic, partition) { -+ timer := time.NewTimer(5 * time.Second) -+ defer timer.Stop() -+ -+ for sess.parent.client.PartitionNotReadable(topic, partition) { -+ select { -+ case <-ctx.Done(): -+ return -+ case <-parent.closed: -+ return -+ case <-timer.C: -+ timer.Reset(5 * time.Second) -+ } -+ } -+ } -+ - // consume a single topic/partition, blocking - sess.consume(topic, partition) - }(topic, partition) -diff --git a/vendor/github.com/IBM/sarama/produce_set.go b/vendor/github.com/IBM/sarama/produce_set.go -index 004fc649039ec..c91403d7182b7 100644 ---- a/vendor/github.com/IBM/sarama/produce_set.go -+++ b/vendor/github.com/IBM/sarama/produce_set.go -@@ -164,9 +164,13 @@ func (ps *produceSet) buildRequest() *ProduceRequest { - rb := set.recordsToSend.RecordBatch - if len(rb.Records) > 0 { - rb.LastOffsetDelta = int32(len(rb.Records) - 1) -+ var maxTimestampDelta time.Duration - for i, record := range rb.Records { - record.OffsetDelta = int64(i) -+ maxTimestampDelta = max(maxTimestampDelta, record.TimestampDelta) - } -+ // Also set the MaxTimestamp similar to other clients. -+ rb.MaxTimestamp = rb.FirstTimestamp.Add(maxTimestampDelta) - } - - // Set the batch as transactional when a transactionalID is set -diff --git a/vendor/github.com/IBM/sarama/sync_producer.go b/vendor/github.com/IBM/sarama/sync_producer.go -index 3119baa6d7f60..f6876fbee02c0 100644 ---- a/vendor/github.com/IBM/sarama/sync_producer.go -+++ b/vendor/github.com/IBM/sarama/sync_producer.go -@@ -2,6 +2,12 @@ package sarama - - import ""sync"" - -+var expectationsPool = sync.Pool{ -+ New: func() interface{} { -+ return make(chan *ProducerError, 1) -+ }, -+} -+ - // SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct - // broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer - // to avoid leaks, it may not be garbage-collected automatically when it passes out of scope. -@@ -110,11 +116,13 @@ func verifyProducerConfig(config *Config) error { - } - - func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) { -- expectation := make(chan *ProducerError, 1) -+ expectation := expectationsPool.Get().(chan *ProducerError) - msg.expectation = expectation - sp.producer.Input() <- msg -- -- if pErr := <-expectation; pErr != nil { -+ pErr := <-expectation -+ msg.expectation = nil -+ expectationsPool.Put(expectation) -+ if pErr != nil { - return -1, -1, pErr.Err - } - -@@ -122,20 +130,24 @@ func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offs - } - - func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error { -- expectations := make(chan chan *ProducerError, len(msgs)) -+ indices := make(chan int, len(msgs)) - go func() { -- for _, msg := range msgs { -- expectation := make(chan *ProducerError, 1) -+ for i, msg := range msgs { -+ expectation := expectationsPool.Get().(chan *ProducerError) - msg.expectation = expectation - sp.producer.Input() <- msg -- expectations <- expectation -+ indices <- i - } -- close(expectations) -+ close(indices) - }() - - var errors ProducerErrors -- for expectation := range expectations { -- if pErr := <-expectation; pErr != nil { -+ for i := range indices { -+ expectation := msgs[i].expectation -+ pErr := <-expectation -+ msgs[i].expectation = nil -+ expectationsPool.Put(expectation) -+ if pErr != nil { - errors = append(errors, pErr) - } - } -diff --git a/vendor/github.com/IBM/sarama/utils.go b/vendor/github.com/IBM/sarama/utils.go -index b0e1aceff14d4..83a992a5f3cac 100644 ---- a/vendor/github.com/IBM/sarama/utils.go -+++ b/vendor/github.com/IBM/sarama/utils.go -@@ -3,8 +3,15 @@ package sarama - import ( - ""bufio"" - ""fmt"" -+ ""math/rand"" - ""net"" - ""regexp"" -+ ""time"" -+) -+ -+const ( -+ defaultRetryBackoff = 100 * time.Millisecond -+ defaultRetryMaxBackoff = 1000 * time.Millisecond - ) - - type none struct{} -@@ -203,6 +210,7 @@ var ( - V3_6_2_0 = newKafkaVersion(3, 6, 2, 0) - V3_7_0_0 = newKafkaVersion(3, 7, 0, 0) - V3_7_1_0 = newKafkaVersion(3, 7, 1, 0) -+ V3_7_2_0 = newKafkaVersion(3, 7, 2, 0) - V3_8_0_0 = newKafkaVersion(3, 8, 0, 0) - V3_8_1_0 = newKafkaVersion(3, 8, 1, 0) - V3_9_0_0 = newKafkaVersion(3, 9, 0, 0) -@@ -275,6 +283,7 @@ var ( - V3_6_2_0, - V3_7_0_0, - V3_7_1_0, -+ V3_7_2_0, - V3_8_0_0, - V3_8_1_0, - V3_9_0_0, -@@ -342,3 +351,39 @@ func (v KafkaVersion) String() string { - - return fmt.Sprintf(""%d.%d.%d"", v.version[0], v.version[1], v.version[2]) - } -+ -+// NewExponentialBackoff returns a function that implements an exponential backoff strategy with jitter. -+// It follows KIP-580, implementing the formula: -+// MIN(retry.backoff.max.ms, (retry.backoff.ms * 2**(failures - 1)) * random(0.8, 1.2)) -+// This ensures retries start with `backoff` and exponentially increase until `maxBackoff`, with added jitter. -+// The behavior when `failures = 0` is not explicitly defined in KIP-580 and is left to implementation discretion. -+// -+// Example usage: -+// -+// backoffFunc := sarama.NewExponentialBackoff(config.Producer.Retry.Backoff, 2*time.Second) -+// config.Producer.Retry.BackoffFunc = backoffFunc -+func NewExponentialBackoff(backoff time.Duration, maxBackoff time.Duration) func(retries, maxRetries int) time.Duration { -+ if backoff <= 0 { -+ backoff = defaultRetryBackoff -+ } -+ if maxBackoff <= 0 { -+ maxBackoff = defaultRetryMaxBackoff -+ } -+ -+ if backoff > maxBackoff { -+ Logger.Println(""Warning: backoff is greater than maxBackoff, using maxBackoff instead."") -+ backoff = maxBackoff -+ } -+ -+ return func(retries, maxRetries int) time.Duration { -+ if retries <= 0 { -+ return backoff -+ } -+ -+ calculatedBackoff := backoff * time.Duration(1<<(retries-1)) -+ jitter := 0.8 + 0.4*rand.Float64() -+ calculatedBackoff = time.Duration(float64(calculatedBackoff) * jitter) -+ -+ return min(calculatedBackoff, maxBackoff) -+ } -+} -diff --git a/vendor/modules.txt b/vendor/modules.txt -index 4e82361106232..504e5795d45dc 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -263,7 +263,7 @@ github.com/IBM/ibm-cos-sdk-go/private/protocol/restxml - github.com/IBM/ibm-cos-sdk-go/private/protocol/xml/xmlutil - github.com/IBM/ibm-cos-sdk-go/service/s3 - github.com/IBM/ibm-cos-sdk-go/service/s3/s3iface --# github.com/IBM/sarama v1.45.0 -+# github.com/IBM/sarama v1.45.1 - ## explicit; go 1.21 - github.com/IBM/sarama - # github.com/Masterminds/goutils v1.1.1",fix,"update module github.com/ibm/sarama to v1.45.1 (main) (#16522) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -88721889f5a8facb4d0314bc3f564102aa9ffe9d,2021-09-09 05:57:43,Bernhard Millauer,"Correct the indention for azure configuration (#4283) - -The snippet part for storage_config was indented into schema_config which is wrong.",False,"diff --git a/docs/sources/storage/_index.md b/docs/sources/storage/_index.md -index 5d254ddba97c2..9c6ab51236684 100644 ---- a/docs/sources/storage/_index.md -+++ b/docs/sources/storage/_index.md -@@ -298,20 +298,20 @@ schema_config: - object_store: azure - schema: v11 - store: boltdb-shipper -- storage_config: -- azure: -- # For the account-key, see docs: https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal -- account_key: -- # Your azure account name -- account_name: -- # See https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction#containers -- container_name: -- request_timeout: 0 -- boltdb_shipper: -- active_index_directory: /data/loki/boltdb-shipper-active -- cache_location: /data/loki/boltdb-shipper-cache -- cache_ttl: 24h -- shared_store: azure -- filesystem: -- directory: /data/loki/chunks --``` -\ No newline at end of file -+storage_config: -+ azure: -+ # For the account-key, see docs: https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal -+ account_key: -+ # Your azure account name -+ account_name: -+ # See https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction#containers -+ container_name: -+ request_timeout: 0 -+ boltdb_shipper: -+ active_index_directory: /data/loki/boltdb-shipper-active -+ cache_location: /data/loki/boltdb-shipper-cache -+ cache_ttl: 24h -+ shared_store: azure -+ filesystem: -+ directory: /data/loki/chunks -+```",unknown,"Correct the indention for azure configuration (#4283) - -The snippet part for storage_config was indented into schema_config which is wrong." -6c49cc07305e823fc0f405f515b39e3ddc649303,2024-12-20 14:07:00,Semir Ajruli,"fix(ci): Revert ""fixed `Publish Rendered Helm Chart Diff` workflow"" (#15506)",False,"diff --git a/.github/workflows/helm-loki-ci.yml b/.github/workflows/helm-loki-ci.yml -index 6c951cb689ff4..7ecac70227211 100644 ---- a/.github/workflows/helm-loki-ci.yml -+++ b/.github/workflows/helm-loki-ci.yml -@@ -1,14 +1,14 @@ - --- - name: helm-loki-ci - on: -- # It runs with the configuration from base branch, so the changes of this file from the PR won't be taken into account until they are merged into main. see: https://docs.github.com/en/actions/writing-workflows/choosing-when-your-workflow-runs/events-that-trigger-workflows#pull_request_target . -- # This change is required to allow this CI to be run on Pull Requests opened from a fork repository -- pull_request_target: -+ pull_request: - paths: - - ""production/helm/loki/**"" - - jobs: - publish-diff: -+ # temporarily disable the workflow for the PRs where PRs branch is from fork. -+ if: github.event.pull_request.head.repo.full_name == github.repository - name: Publish Rendered Helm Chart Diff - runs-on: ubuntu-latest - steps:",fix,"Revert ""fixed `Publish Rendered Helm Chart Diff` workflow"" (#15506)" -cd8bb0f148dfa8cfdfe8e5a67be2ccc711452658,2018-06-25 13:15:05,Tom Wilkie,"Only put the http middleware on specific routes, we don't want auth on /metrics. - -Signed-off-by: Tom Wilkie ",False,"diff --git a/cmd/querier/main.go b/cmd/querier/main.go -index 43a2654ff57ae..4dfa5e32ebd88 100644 ---- a/cmd/querier/main.go -+++ b/cmd/querier/main.go -@@ -31,12 +31,6 @@ func main() { - GRPCMiddleware: []grpc.UnaryServerInterceptor{ - middleware.ServerUserHeaderInterceptor, - }, -- HTTPMiddleware: []middleware.Interface{ -- middleware.Func(func(handler http.Handler) http.Handler { -- return nethttp.Middleware(opentracing.GlobalTracer(), handler, operationNameFunc) -- }), -- middleware.AuthenticateUser, -- }, - } - ringConfig ring.Config - querierConfig querier.Config -@@ -66,7 +60,14 @@ func main() { - } - defer server.Shutdown() - -- server.HTTP.Handle(""/api/prom/query"", http.HandlerFunc(querier.QueryHandler)) -- server.HTTP.Handle(""/api/prom/label/{name}/values"", http.HandlerFunc(querier.LabelHandler)) -+ httpMiddleware := middleware.Merge( -+ middleware.Func(func(handler http.Handler) http.Handler { -+ return nethttp.Middleware(opentracing.GlobalTracer(), handler, operationNameFunc) -+ }), -+ middleware.AuthenticateUser, -+ ) -+ -+ server.HTTP.Handle(""/api/prom/query"", httpMiddleware.Wrap(http.HandlerFunc(querier.QueryHandler))) -+ server.HTTP.Handle(""/api/prom/label/{name}/values"", httpMiddleware.Wrap(http.HandlerFunc(querier.LabelHandler))) - server.Run() - }",unknown,"Only put the http middleware on specific routes, we don't want auth on /metrics. - -Signed-off-by: Tom Wilkie " -685c898b9532e5e3d85dc7fe418764983d7cdfa9,2022-07-19 23:39:34,Gerard Vanloo,operator: Addons work in restricted policy (#6564),False,"diff --git a/operator/hack/addons_dev.yaml b/operator/hack/addons_dev.yaml -index 6519eb60fc249..f9781b46f6c89 100644 ---- a/operator/hack/addons_dev.yaml -+++ b/operator/hack/addons_dev.yaml -@@ -42,7 +42,7 @@ spec: - value: /var/run/secrets/kubernetes.io/serviceaccount/token - args: - - -c -- - while true; do logcli query '{job=""systemd-journal""}'; sleep 30; done -+ - while true; do logcli query '{namespace=""default""}'; sleep 30; done - securityContext: - allowPrivilegeEscalation: false - capabilities: -@@ -50,6 +50,7 @@ spec: - - ALL - serviceAccountName: lokistack-dev-addons-logcli - securityContext: -+ runAsUser: 10002 - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault -@@ -76,12 +77,17 @@ spec: - args: - - -config.file=/etc/promtail/promtail.yaml - - -log.level=info -+ env: -+ - name: 'HOSTNAME' -+ valueFrom: -+ fieldRef: -+ fieldPath: 'spec.nodeName' - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /etc/promtail - name: config -- - mountPath: /run/promtail -+ - mountPath: /tmp/promtail - name: run - - mountPath: /var/lib/docker/containers - name: docker -@@ -93,34 +99,39 @@ spec: - name: journal - readOnly: true - securityContext: -- allowPrivilegeEscalation: false -+ privileged: true -+ runAsNonRoot: false -+ readOnlyRootFilesystem: true - capabilities: - drop: - - ALL - serviceAccountName: lokistack-dev-addons-promtail - securityContext: -+ runAsUser: 10002 - runAsNonRoot: true -+ seccompProfile: -+ type: RuntimeDefault - volumes: -- - configMap: -+ - name: config -+ configMap: - defaultMode: 420 - name: lokistack-dev-addons-promtail -- name: config -- - emptyDir: -- medium: """" -- sizeLimit: 10G -- name: run -- - emptyDir: -+ - name: run -+ emptyDir: - medium: """" -- sizeLimit: 10G -- name: docker -- - emptyDir: -- medium: """" -- sizeLimit: 10G -- name: pods -- - emptyDir: -- medium: """" -- sizeLimit: 10G -- name: journal -+ sizeLimit: 5Gi -+ - name: docker -+ hostPath: -+ path: /var/lib/docker/containers -+ type: """" -+ - name: pods -+ hostPath: -+ path: /var/log/pods -+ type: """" -+ - name: journal -+ hostPath: -+ path: /var/log/journal -+ type: """" - --- - apiVersion: v1 - kind: ConfigMap -@@ -140,7 +151,7 @@ data: - batchwait: 10s - timeout: 10s - positions: -- filename: /run/promtail/positions.yaml -+ filename: /tmp/promtail/positions.yaml - server: - http_listen_port: 3100 - grpc_listen_port: 9095 -@@ -409,21 +420,6 @@ data: - target_label: __path__ - --- - apiVersion: rbac.authorization.k8s.io/v1 --kind: RoleBinding --metadata: -- name: lokistack-dev-addons-writer -- labels: -- app.kubernetes.io/name: promtail -- app.kubernetes.io/instance: developer-addons --roleRef: -- apiGroup: rbac.authorization.k8s.io -- kind: Role -- name: lokistack-dev-addons-writer --subjects: --- kind: ServiceAccount -- name: lokistack-dev-addons-promtail ----- --apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: lokistack-dev-addons-reader -diff --git a/operator/hack/addons_ocp.yaml b/operator/hack/addons_ocp.yaml -index 208a747e69e3f..fde24765b2eab 100644 ---- a/operator/hack/addons_ocp.yaml -+++ b/operator/hack/addons_ocp.yaml -@@ -42,7 +42,7 @@ spec: - value: /var/run/secrets/kubernetes.io/serviceaccount/token - args: - - -c -- - while true; do logcli --ca-cert=""/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt"" query '{job=""systemd-journal""}'; sleep 30; done -+ - while true; do logcli --ca-cert=""/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt"" query '{namespace=""openshift-logging""}'; sleep 30; done - securityContext: - allowPrivilegeEscalation: false - capabilities: -@@ -74,12 +74,17 @@ spec: - args: - - -config.file=/etc/promtail/promtail.yaml - - -log.level=info -+ env: -+ - name: 'HOSTNAME' -+ valueFrom: -+ fieldRef: -+ fieldPath: 'spec.nodeName' - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /etc/promtail - name: config -- - mountPath: /run/promtail -+ - mountPath: /tmp/promtail - name: run - - mountPath: /var/lib/docker/containers - name: docker -@@ -91,7 +96,9 @@ spec: - name: journal - readOnly: true - securityContext: -- allowPrivilegeEscalation: false -+ privileged: true -+ runAsNonRoot: false -+ readOnlyRootFilesystem: true - capabilities: - drop: - - ALL -@@ -99,26 +106,26 @@ spec: - securityContext: - runAsNonRoot: true - volumes: -- - configMap: -+ - name: config -+ configMap: - defaultMode: 420 - name: lokistack-dev-addons-promtail -- name: config -- - emptyDir: -- medium: """" -- sizeLimit: 10G -- name: run -- - emptyDir: -- medium: """" -- sizeLimit: 10G -- name: docker -- - emptyDir: -+ - name: run -+ emptyDir: - medium: """" -- sizeLimit: 10G -- name: pods -- - emptyDir: -- medium: """" -- sizeLimit: 10G -- name: journal -+ sizeLimit: 5Gi -+ - name: docker -+ hostPath: -+ path: /var/lib/docker/containers -+ type: """" -+ - name: pods -+ hostPath: -+ path: /var/log/pods -+ type: """" -+ - name: journal -+ hostPath: -+ path: /var/log/journal -+ type: """" - --- - apiVersion: v1 - kind: ConfigMap -@@ -140,7 +147,7 @@ data: - batchwait: 10s - timeout: 10s - positions: -- filename: /run/promtail/positions.yaml -+ filename: /tmp/promtail/positions.yaml - server: - http_listen_port: 3100 - grpc_listen_port: 9095 -@@ -409,6 +416,20 @@ data: - target_label: __path__ - --- - apiVersion: rbac.authorization.k8s.io/v1 -+kind: Role -+metadata: -+ name: lokistack-dev-addons-writer -+rules: -+- apiGroups: -+ - security.openshift.io -+ resourceNames: -+ - privileged -+ resources: -+ - securitycontextconstraints -+ verbs: -+ - use -+--- -+apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: lokistack-dev-addons-writer",operator,Addons work in restricted policy (#6564) -b353acf6e229fee1a74fd2d199cb2b58b109c2cb,2024-12-13 18:59:28,Ashwanth,chore(block-scheduler): add scheduler grpc methods to auth mw ignore list (#15388),False,"diff --git a/pkg/blockbuilder/types/grpc_transport.go b/pkg/blockbuilder/types/grpc_transport.go -index 4d52bdfc7745e..b53fdeeb4a7d4 100644 ---- a/pkg/blockbuilder/types/grpc_transport.go -+++ b/pkg/blockbuilder/types/grpc_transport.go -@@ -6,6 +6,9 @@ import ( - - ""github.com/grafana/dskit/grpcclient"" - ""github.com/grafana/dskit/instrument"" -+ ""github.com/grafana/dskit/middleware"" -+ otgrpc ""github.com/opentracing-contrib/go-grpc"" -+ ""github.com/opentracing/opentracing-go"" - ""github.com/pkg/errors"" - ""github.com/prometheus/client_golang/prometheus"" - ""github.com/prometheus/client_golang/prometheus/promauto"" -@@ -47,7 +50,16 @@ func NewGRPCTransportFromAddress( - cfg grpcclient.Config, - reg prometheus.Registerer, - ) (*GRPCTransport, error) { -- dialOpts, err := cfg.DialOption(grpcclient.Instrument(newGRPCTransportMetrics(reg).requestLatency)) -+ metrics := newGRPCTransportMetrics(reg) -+ dialOpts, err := cfg.DialOption( -+ []grpc.UnaryClientInterceptor{ -+ otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), -+ middleware.UnaryClientInstrumentInterceptor(metrics.requestLatency), -+ }, []grpc.StreamClientInterceptor{ -+ otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()), -+ middleware.StreamClientInstrumentInterceptor(metrics.requestLatency), -+ }, -+ ) - if err != nil { - return nil, err - } -diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go -index 9747a8f231f7e..14963c09940c4 100644 ---- a/pkg/loki/loki.go -+++ b/pkg/loki/loki.go -@@ -430,6 +430,9 @@ func (t *Loki) setupAuthMiddleware() { - ""/schedulerpb.SchedulerForFrontend/FrontendLoop"", - ""/schedulerpb.SchedulerForQuerier/QuerierLoop"", - ""/schedulerpb.SchedulerForQuerier/NotifyQuerierShutdown"", -+ ""/blockbuilder.types.SchedulerService/GetJob"", -+ ""/blockbuilder.types.SchedulerService/CompleteJob"", -+ ""/blockbuilder.types.SchedulerService/SyncJob"", - }) - }",chore,add scheduler grpc methods to auth mw ignore list (#15388) -fc68205cdc0b891b64d3393d60b281cfb3a202b5,2025-03-13 22:10:04,George Robinson,chore: add tests for checking limits in distributor (#16741),False,"diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go -index a670e6355f8b7..f44de21a64e9f 100644 ---- a/pkg/distributor/distributor.go -+++ b/pkg/distributor/distributor.go -@@ -696,7 +696,7 @@ func (d *Distributor) PushWithResolver(ctx context.Context, req *logproto.PushRe - } - - if d.cfg.IngestLimitsEnabled { -- exceedsLimits, _, err := d.exceedsLimits(ctx, tenantID, streams) -+ exceedsLimits, _, err := d.exceedsLimits(ctx, tenantID, streams, d.doExceedsLimitsRPC) - if err != nil { - level.Error(d.logger).Log(""msg"", ""failed to check if request exceeds limits, request has been accepted"", ""err"", err) - } -@@ -1159,11 +1159,12 @@ func (d *Distributor) exceedsLimits( - ctx context.Context, - tenantID string, - streams []KeyedStream, -+ doExceedsLimitsFn doExceedsLimitsFunc, - ) (bool, []string, error) { - if !d.cfg.IngestLimitsEnabled { - return false, nil, nil - } -- resp, err := d.doExceedsLimitsRPC(ctx, tenantID, streams) -+ resp, err := doExceedsLimitsFn(ctx, tenantID, streams) - if err != nil { - return false, nil, err - } -@@ -1187,6 +1188,13 @@ func (d *Distributor) exceedsLimits( - return true, reasons, nil - } - -+// doExceedsLimitsFunc enables stubbing out doExceedsLimitsRPC for tests. -+type doExceedsLimitsFunc func( -+ ctx context.Context, -+ tenantID string, -+ streams []KeyedStream, -+) (*logproto.ExceedsLimitsResponse, error) -+ - // doExceedsLimitsRPC executes an RPC to the limits-frontend service to check - // if per-tenant limits have been exceeded. If an RPC call returns an error, - // it failsover to the next limits-frontend service. The failover is repeated -diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go -index 545e7813288b6..31fecccad98ee 100644 ---- a/pkg/distributor/distributor_test.go -+++ b/pkg/distributor/distributor_test.go -@@ -2,6 +2,7 @@ package distributor - - import ( - ""context"" -+ ""errors"" - ""fmt"" - ""math"" - ""math/rand"" -@@ -2387,3 +2388,67 @@ func TestRequestScopedStreamResolver(t *testing.T) { - policy = newResolver.PolicyFor(labels.FromStrings(""env"", ""dev"")) - require.Equal(t, ""policy1"", policy) - } -+ -+func TestExceedsLimits(t *testing.T) { -+ limits := &validation.Limits{} -+ flagext.DefaultValues(limits) -+ distributors, _ := prepare(t, 1, 0, limits, nil) -+ d := distributors[0] -+ -+ ctx := context.Background() -+ streams := []KeyedStream{{ -+ HashKeyNoShard: 1, -+ Stream: logproto.Stream{ -+ Labels: ""{foo=\""bar\""}"", -+ }, -+ }} -+ -+ t.Run(""no limits should be checked when disabled"", func(t *testing.T) { -+ d.cfg.IngestLimitsEnabled = false -+ doExceedsLimitsFn := func(_ context.Context, _ string, _ []KeyedStream) (*logproto.ExceedsLimitsResponse, error) { -+ t.Fail() // Should not be called. -+ return nil, nil -+ } -+ exceedsLimits, reasons, err := d.exceedsLimits(ctx, ""test"", streams, doExceedsLimitsFn) -+ require.Nil(t, err) -+ require.False(t, exceedsLimits) -+ require.Nil(t, reasons) -+ }) -+ -+ t.Run(""error should be returned if limits cannot be checked"", func(t *testing.T) { -+ d.cfg.IngestLimitsEnabled = true -+ doExceedsLimitsFn := func(_ context.Context, _ string, _ []KeyedStream) (*logproto.ExceedsLimitsResponse, error) { -+ return nil, errors.New(""failed to check limits"") -+ } -+ exceedsLimits, reasons, err := d.exceedsLimits(ctx, ""test"", streams, doExceedsLimitsFn) -+ require.EqualError(t, err, ""failed to check limits"") -+ require.False(t, exceedsLimits) -+ require.Nil(t, reasons) -+ }) -+ -+ t.Run(""stream exceeds limits"", func(t *testing.T) { -+ doExceedsLimitsFn := func(_ context.Context, _ string, _ []KeyedStream) (*logproto.ExceedsLimitsResponse, error) { -+ return &logproto.ExceedsLimitsResponse{ -+ Tenant: ""test"", -+ RejectedStreams: []*logproto.RejectedStream{{ -+ StreamHash: 1, -+ Reason: ""test"", -+ }}, -+ }, nil -+ } -+ exceedsLimits, reasons, err := d.exceedsLimits(ctx, ""test"", streams, doExceedsLimitsFn) -+ require.Nil(t, err) -+ require.True(t, exceedsLimits) -+ require.Equal(t, []string{""stream {foo=\""bar\""} was rejected because \""test\""""}, reasons) -+ }) -+ -+ t.Run(""stream does not exceed limits"", func(t *testing.T) { -+ doExceedsLimitsFn := func(_ context.Context, _ string, _ []KeyedStream) (*logproto.ExceedsLimitsResponse, error) { -+ return &logproto.ExceedsLimitsResponse{}, nil -+ } -+ exceedsLimits, reasons, err := d.exceedsLimits(ctx, ""test"", streams, doExceedsLimitsFn) -+ require.Nil(t, err) -+ require.False(t, exceedsLimits) -+ require.Nil(t, reasons) -+ }) -+}",chore,add tests for checking limits in distributor (#16741) -d188d06370ff0a721da55c72c4f885822ef27b0a,2024-12-30 10:05:47,renovate[bot],"fix(deps): update module github.com/sony/gobreaker/v2 to v2.1.0 (#15556) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/go.mod b/go.mod -index 291b354f2a829..1471fa17a7f88 100644 ---- a/go.mod -+++ b/go.mod -@@ -90,7 +90,7 @@ require ( - github.com/segmentio/fasthash v1.0.3 - github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c - github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 -- github.com/sony/gobreaker/v2 v2.0.0 -+ github.com/sony/gobreaker/v2 v2.1.0 - github.com/spf13/afero v1.11.0 - github.com/stretchr/testify v1.10.0 - github.com/uber/jaeger-client-go v2.30.0+incompatible -@@ -170,6 +170,7 @@ require ( - github.com/gabriel-vasile/mimetype v1.4.4 // indirect - github.com/go-ini/ini v1.67.0 // indirect - github.com/go-ole/go-ole v1.3.0 // indirect -+ github.com/go-redsync/redsync/v4 v4.13.0 // indirect - github.com/goccy/go-json v0.10.3 // indirect - github.com/gorilla/handlers v1.5.2 // indirect - github.com/hashicorp/golang-lru v1.0.2 // indirect -diff --git a/go.sum b/go.sum -index 28173f66499aa..df7047edbada0 100644 ---- a/go.sum -+++ b/go.sum -@@ -460,6 +460,14 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn - github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= - github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4= - github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= -+github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= -+github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -+github.com/go-redis/redis/v7 v7.4.1 h1:PASvf36gyUpr2zdOUS/9Zqc80GbM+9BDyiJSJDDOrTI= -+github.com/go-redis/redis/v7 v7.4.1/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= -+github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= -+github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= -+github.com/go-redsync/redsync/v4 v4.13.0 h1:49X6GJfnbLGaIpBBREM/zA4uIMDXKAh1NDkvQ1EkZKA= -+github.com/go-redsync/redsync/v4 v4.13.0/go.mod h1:HMW4Q224GZQz6x1Xc7040Yfgacukdzu7ifTDAKiyErQ= - github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= - github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= - github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -@@ -530,6 +538,8 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6 - github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= - github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= - github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -+github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws= -+github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE= - github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= - github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= - github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= -@@ -1026,6 +1036,8 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5X - github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= - github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= - github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= -+github.com/redis/rueidis v1.0.19 h1:s65oWtotzlIFN8eMPhyYwxlwLR1lUdhza2KtWprKYSo= -+github.com/redis/rueidis v1.0.19/go.mod h1:8B+r5wdnjwK3lTFml5VtxjzGOQAC+5UmujoD12pDrEo= - github.com/richardartoul/molecule v1.0.0 h1:+LFA9cT7fn8KF39zy4dhOnwcOwRoqKiBkPqKqya+8+U= - github.com/richardartoul/molecule v1.0.0/go.mod h1:uvX/8buq8uVeiZiFht+0lqSLBHF+uGV8BrTv8W/SIwk= - github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -@@ -1071,8 +1083,8 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k - github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= - github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg= - github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= --github.com/sony/gobreaker/v2 v2.0.0 h1:23AaR4JQ65y4rz8JWMzgXw2gKOykZ/qfqYunll4OwJ4= --github.com/sony/gobreaker/v2 v2.0.0/go.mod h1:8JnRUz80DJ1/ne8M8v7nmTs2713i58nIt4s7XcGe/DI= -+github.com/sony/gobreaker/v2 v2.1.0 h1:av2BnjtRmVPWBvy5gSFPytm1J8BmN5AGhq875FfGKDM= -+github.com/sony/gobreaker/v2 v2.1.0/go.mod h1:dO3Q/nCzxZj6ICjH6J/gM0r4oAwBMVLY8YAQf+NTtUg= - github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= - github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= - github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -@@ -1106,6 +1118,8 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl - github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= - github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= - github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -+github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203 h1:QVqDTf3h2WHt08YuiTGPZLls0Wq99X9bWd0Q5ZSBesM= -+github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203/go.mod h1:oqN97ltKNihBbwlX8dLpwxCl3+HnXKV/R0e+sRLd9C8= - github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4O8IB2ozzxM= - github.com/tencentyun/cos-go-sdk-v5 v0.7.40/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw= - github.com/thanos-io/objstore v0.0.0-20241217111833-063ea3806b2f h1:9ODgWM6r0tS9RTRJji+CShr4of09tKscnaXNrRVxPog= -diff --git a/vendor/github.com/go-redsync/redsync/v4/.gitignore b/vendor/github.com/go-redsync/redsync/v4/.gitignore -new file mode 100644 -index 0000000000000..82895ad098f72 ---- /dev/null -+++ b/vendor/github.com/go-redsync/redsync/v4/.gitignore -@@ -0,0 +1,3 @@ -+/test.out -+dump.rdb -+.idea -diff --git a/vendor/github.com/go-redsync/redsync/v4/.gitlab-ci.yml b/vendor/github.com/go-redsync/redsync/v4/.gitlab-ci.yml -new file mode 100644 -index 0000000000000..8d1f87840ce9e ---- /dev/null -+++ b/vendor/github.com/go-redsync/redsync/v4/.gitlab-ci.yml -@@ -0,0 +1,50 @@ -+variables: -+ GOCACHE: $CI_PROJECT_DIR/.go-build -+ -+cache: -+ key: ""$CI_PROJECT_PATH $CI_BUILD_REF_NAME"" -+ paths: -+ - .go-build -+ -+image: registry.furqansoftware.net/cardboard/golang:1.21.5-bullseye-0 -+ -+stages: -+ - lint -+ - build -+ - test -+ -+lint: -+ stage: lint -+ script: -+ - go install honnef.co/go/tools/cmd/staticcheck@2023.1.5 -+ - staticcheck ./... -+ -+build: -+ stage: build -+ script: -+ - go build ./... -+ -+test:build: -+ stage: test -+ needs: -+ - build -+ script: -+ - make test.build -+ artifacts: -+ expire_in: 3 hours -+ paths: -+ - test.out/* -+ -+test:run: -+ image: redis:7.2.3 -+ stage: test -+ needs: -+ - test:build -+ dependencies: -+ - test:build -+ script: -+ - | -+ for f in test.out/*; do -+ chmod +x $f -+ ./$f -test.v -+ done -diff --git a/vendor/github.com/go-redsync/redsync/v4/.travis.yml b/vendor/github.com/go-redsync/redsync/v4/.travis.yml -new file mode 100644 -index 0000000000000..27aa18d4a76a7 ---- /dev/null -+++ b/vendor/github.com/go-redsync/redsync/v4/.travis.yml -@@ -0,0 +1,13 @@ -+--- -+language: go -+ -+go: -+ - ""1.15"" -+ - ""1.16"" -+ -+env: -+ global: -+ - GO111MODULE=on -+ -+services: -+ - redis-server -diff --git a/vendor/github.com/go-redsync/redsync/v4/LICENSE b/vendor/github.com/go-redsync/redsync/v4/LICENSE -new file mode 100644 -index 0000000000000..b22832f4b5e92 ---- /dev/null -+++ b/vendor/github.com/go-redsync/redsync/v4/LICENSE -@@ -0,0 +1,27 @@ -+Copyright (c) 2023, Mahmud Ridwan -+All rights reserved. -+ -+Redistribution and use in source and binary forms, with or without -+modification, are permitted provided that the following conditions are met: -+ -+* Redistributions of source code must retain the above copyright notice, this -+ list of conditions and the following disclaimer. -+ -+* Redistributions in binary form must reproduce the above copyright notice, -+ this list of conditions and the following disclaimer in the documentation -+ and/or other materials provided with the distribution. -+ -+* Neither the name of the Redsync nor the names of its -+ contributors may be used to endorse or promote products derived from -+ this software without specific prior written permission. -+ -+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ""AS IS"" -+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -diff --git a/vendor/github.com/go-redsync/redsync/v4/Makefile b/vendor/github.com/go-redsync/redsync/v4/Makefile -new file mode 100644 -index 0000000000000..2860de9c79f7d ---- /dev/null -+++ b/vendor/github.com/go-redsync/redsync/v4/Makefile -@@ -0,0 +1,24 @@ -+.PHONY: test -+test: -+ go test -race ./... -+ -+.PHONY: test.build -+test.build: -+ mkdir -p test.out/ -+ for f in $(shell go list ./...); do \ -+ go test -race -c -o test.out/`echo ""$$f"" | sha256sum | cut -c1-5`-`basename ""$$f""` $$f ; \ -+ done -+ -+.PHONY: test.run -+test.run: -+ for f in test.out/*; do \ -+ $$f ; \ -+ done -+ -+.PHONY: lint -+lint: -+ staticcheck ./... -+ -+.PHONY: lint.tools.install -+lint.tools.install: -+ go install honnef.co/go/tools/cmd/staticcheck@2023.1.5 -diff --git a/vendor/github.com/go-redsync/redsync/v4/README.md b/vendor/github.com/go-redsync/redsync/v4/README.md -new file mode 100644 -index 0000000000000..88ecd55b1640f ---- /dev/null -+++ b/vendor/github.com/go-redsync/redsync/v4/README.md -@@ -0,0 +1,90 @@ -+# Redsync -+ -+[![Go Reference](https://pkg.go.dev/badge/github.com/go-redsync/redsync/v4.svg)](https://pkg.go.dev/github.com/go-redsync/redsync/v4) [![Build Status](https://travis-ci.org/go-redsync/redsync.svg?branch=master)](https://travis-ci.org/go-redsync/redsync) -+ -+Redsync provides a Redis-based distributed mutual exclusion lock implementation for Go as described in [this post](http://redis.io/topics/distlock). A reference library (by [antirez](https://github.com/antirez)) for Ruby is available at [github.com/antirez/redlock-rb](https://github.com/antirez/redlock-rb). -+ -+## Installation -+ -+Install Redsync using the go get command: -+ -+ $ go get github.com/go-redsync/redsync/v4 -+ -+Two driver implementations will be installed; however, only the one used will be included in your project. -+ -+ * [Redigo](https://github.com/gomodule/redigo) -+ * [Go-redis](https://github.com/go-redis/redis) -+ -+See the [examples](examples) folder for usage of each driver. -+ -+## Documentation -+ -+- [Reference](https://godoc.org/github.com/go-redsync/redsync) -+ -+## Usage -+ -+Error handling is simplified to `panic` for shorter example. -+ -+```go -+package main -+ -+import ( -+ goredislib ""github.com/redis/go-redis/v9"" -+ ""github.com/go-redsync/redsync/v4"" -+ ""github.com/go-redsync/redsync/v4/redis/goredis/v9"" -+) -+ -+func main() { -+ // Create a pool with go-redis (or redigo) which is the pool redisync will -+ // use while communicating with Redis. This can also be any pool that -+ // implements the `redis.Pool` interface. -+ client := goredislib.NewClient(&goredislib.Options{ -+ Addr: ""localhost:6379"", -+ }) -+ pool := goredis.NewPool(client) // or, pool := redigo.NewPool(...) -+ -+ // Create an instance of redisync to be used to obtain a mutual exclusion -+ // lock. -+ rs := redsync.New(pool) -+ -+ // Obtain a new mutex by using the same name for all instances wanting the -+ // same lock. -+ mutexname := ""my-global-mutex"" -+ mutex := rs.NewMutex(mutexname) -+ -+ // Obtain a lock for our given mutex. After this is successful, no one else -+ // can obtain the same lock (the same mutex name) until we unlock it. -+ if err := mutex.Lock(); err != nil { -+ panic(err) -+ } -+ -+ // Do your work that requires the lock. -+ -+ // Release the lock so other processes or threads can obtain a lock. -+ if ok, err := mutex.Unlock(); !ok || err != nil { -+ panic(""unlock failed"") -+ } -+} -+``` -+ -+## Contributing -+ -+Contributions are welcome. -+ -+## License -+ -+Redsync is available under the [BSD (3-Clause) License](https://opensource.org/licenses/BSD-3-Clause). -+ -+## Disclaimer -+ -+This code implements an algorithm which is currently a proposal, it was not formally analyzed. Make sure to understand how it works before using it in production environments. -+ -+## Real World Uses -+ -+Below is a list of public, open source projects that use Redsync: -+ -+- [Sourcegraph](https://github.com/sourcegraph/sourcegraph): Universal code search and intelligence platform. Uses Redsync in an internal cache implementation. -+- [Open Match](https://github.com/googleforgames/open-match) by Google: Flexible, extensible, and scalable video game matchmaking. Uses Redsync with its state store implementation. -+- [Gocron](https://github.com/go-co-op/gocron) by go-co-op: gocron is a job distributed scheduling package which lets you run Go functions at pre-determined intervals using a simple, human-friendly syntax. Uses Redsync with its distributed job scheduler implementation. -+ -+If you are using Redsync in a project please send a pull request to add it to the list. -diff --git a/vendor/github.com/go-redsync/redsync/v4/doc.go b/vendor/github.com/go-redsync/redsync/v4/doc.go -new file mode 100644 -index 0000000000000..b215b6ea713f3 ---- /dev/null -+++ b/vendor/github.com/go-redsync/redsync/v4/doc.go -@@ -0,0 +1,4 @@ -+// Package redsync provides a Redis-based distributed mutual exclusion lock implementation as described in the post http://redis.io/topics/distlock. -+// -+// Values containing the types defined in this package should not be copied. -+package redsync -diff --git a/vendor/github.com/go-redsync/redsync/v4/error.go b/vendor/github.com/go-redsync/redsync/v4/error.go -new file mode 100644 -index 0000000000000..5234e17a734ec ---- /dev/null -+++ b/vendor/github.com/go-redsync/redsync/v4/error.go -@@ -0,0 +1,50 @@ -+package redsync -+ -+import ( -+ ""errors"" -+ ""fmt"" -+) -+ -+// ErrFailed is the error resulting if Redsync fails to acquire the lock after -+// exhausting all retries. -+var ErrFailed = errors.New(""redsync: failed to acquire lock"") -+ -+// ErrExtendFailed is the error resulting if Redsync fails to extend the -+// lock. -+var ErrExtendFailed = errors.New(""redsync: failed to extend lock"") -+ -+// ErrLockAlreadyExpired is the error resulting if trying to unlock the lock which already expired. -+var ErrLockAlreadyExpired = errors.New(""redsync: failed to unlock, lock was already expired"") -+ -+// ErrTaken happens when the lock is already taken in a quorum on nodes. -+type ErrTaken struct { -+ Nodes []int -+} -+ -+func (err ErrTaken) Error() string { -+ return fmt.Sprintf(""lock already taken, locked nodes: %v"", err.Nodes) -+} -+ -+// ErrNodeTaken is the error resulting if the lock is already taken in one of -+// the cluster's nodes -+type ErrNodeTaken struct { -+ Node int -+} -+ -+func (err ErrNodeTaken) Error() string { -+ return fmt.Sprintf(""node #%d: lock already taken"", err.Node) -+} -+ -+// A RedisError is an error communicating with one of the Redis nodes. -+type RedisError struct { -+ Node int -+ Err error -+} -+ -+func (e RedisError) Error() string { -+ return fmt.Sprintf(""node #%d: %s"", e.Node, e.Err) -+} -+ -+func (e RedisError) Unwrap() error { -+ return e.Err -+} -diff --git a/vendor/github.com/go-redsync/redsync/v4/mutex.go b/vendor/github.com/go-redsync/redsync/v4/mutex.go -new file mode 100644 -index 0000000000000..47dd24fd355d5 ---- /dev/null -+++ b/vendor/github.com/go-redsync/redsync/v4/mutex.go -@@ -0,0 +1,350 @@ -+package redsync -+ -+import ( -+ ""context"" -+ ""crypto/rand"" -+ ""encoding/base64"" -+ ""time"" -+ -+ ""github.com/go-redsync/redsync/v4/redis"" -+ ""github.com/hashicorp/go-multierror"" -+) -+ -+// A DelayFunc is used to decide the amount of time to wait between retries. -+type DelayFunc func(tries int) time.Duration -+ -+// A Mutex is a distributed mutual exclusion lock. -+type Mutex struct { -+ name string -+ expiry time.Duration -+ -+ tries int -+ delayFunc DelayFunc -+ -+ driftFactor float64 -+ timeoutFactor float64 -+ -+ quorum int -+ -+ genValueFunc func() (string, error) -+ value string -+ until time.Time -+ shuffle bool -+ failFast bool -+ setNXOnExtend bool -+ -+ pools []redis.Pool -+} -+ -+// Name returns mutex name (i.e. the Redis key). -+func (m *Mutex) Name() string { -+ return m.name -+} -+ -+// Value returns the current random value. The value will be empty until a lock is acquired (or WithValue option is used). -+func (m *Mutex) Value() string { -+ return m.value -+} -+ -+// Until returns the time of validity of acquired lock. The value will be zero value until a lock is acquired. -+func (m *Mutex) Until() time.Time { -+ return m.until -+} -+ -+// TryLock only attempts to lock m once and returns immediately regardless of success or failure without retrying. -+func (m *Mutex) TryLock() error { -+ return m.TryLockContext(context.Background()) -+} -+ -+// TryLockContext only attempts to lock m once and returns immediately regardless of success or failure without retrying. -+func (m *Mutex) TryLockContext(ctx context.Context) error { -+ return m.lockContext(ctx, 1) -+} -+ -+// Lock locks m. In case it returns an error on failure, you may retry to acquire the lock by calling this method again. -+func (m *Mutex) Lock() error { -+ return m.LockContext(context.Background()) -+} -+ -+// LockContext locks m. In case it returns an error on failure, you may retry to acquire the lock by calling this method again. -+func (m *Mutex) LockContext(ctx context.Context) error { -+ return m.lockContext(ctx, m.tries) -+} -+ -+// lockContext locks m. In case it returns an error on failure, you may retry to acquire the lock by calling this method again. -+func (m *Mutex) lockContext(ctx context.Context, tries int) error { -+ if ctx == nil { -+ ctx = context.Background() -+ } -+ -+ value, err := m.genValueFunc() -+ if err != nil { -+ return err -+ } -+ -+ var timer *time.Timer -+ for i := 0; i < tries; i++ { -+ if i != 0 { -+ if timer == nil { -+ timer = time.NewTimer(m.delayFunc(i)) -+ } else { -+ timer.Reset(m.delayFunc(i)) -+ } -+ -+ select { -+ case <-ctx.Done(): -+ timer.Stop() -+ // Exit early if the context is done. -+ return ErrFailed -+ case <-timer.C: -+ // Fall-through when the delay timer completes. -+ } -+ } -+ -+ start := time.Now() -+ -+ n, err := func() (int, error) { -+ ctx, cancel := context.WithTimeout(ctx, time.Duration(int64(float64(m.expiry)*m.timeoutFactor))) -+ defer cancel() -+ return m.actOnPoolsAsync(func(pool redis.Pool) (bool, error) { -+ return m.acquire(ctx, pool, value) -+ }) -+ }() -+ -+ now := time.Now() -+ until := now.Add(m.expiry - now.Sub(start) - time.Duration(int64(float64(m.expiry)*m.driftFactor))) -+ if n >= m.quorum && now.Before(until) { -+ m.value = value -+ m.until = until -+ return nil -+ } -+ _, _ = func() (int, error) { -+ ctx, cancel := context.WithTimeout(ctx, time.Duration(int64(float64(m.expiry)*m.timeoutFactor))) -+ defer cancel() -+ return m.actOnPoolsAsync(func(pool redis.Pool) (bool, error) { -+ return m.release(ctx, pool, value) -+ }) -+ }() -+ if i == tries-1 && err != nil { -+ return err -+ } -+ } -+ -+ return ErrFailed -+} -+ -+// Unlock unlocks m and returns the status of unlock. -+func (m *Mutex) Unlock() (bool, error) { -+ return m.UnlockContext(context.Background()) -+} -+ -+// UnlockContext unlocks m and returns the status of unlock. -+func (m *Mutex) UnlockContext(ctx context.Context) (bool, error) { -+ n, err := m.actOnPoolsAsync(func(pool redis.Pool) (bool, error) { -+ return m.release(ctx, pool, m.value) -+ }) -+ if n < m.quorum { -+ return false, err -+ } -+ return true, nil -+} -+ -+// Extend resets the mutex's expiry and returns the status of expiry extension. -+func (m *Mutex) Extend() (bool, error) { -+ return m.ExtendContext(context.Background()) -+} -+ -+// ExtendContext resets the mutex's expiry and returns the status of expiry extension. -+func (m *Mutex) ExtendContext(ctx context.Context) (bool, error) { -+ start := time.Now() -+ n, err := m.actOnPoolsAsync(func(pool redis.Pool) (bool, error) { -+ return m.touch(ctx, pool, m.value, int(m.expiry/time.Millisecond)) -+ }) -+ if n < m.quorum { -+ return false, err -+ } -+ now := time.Now() -+ until := now.Add(m.expiry - now.Sub(start) - time.Duration(int64(float64(m.expiry)*m.driftFactor))) -+ if now.Before(until) { -+ m.until = until -+ return true, nil -+ } -+ return false, ErrExtendFailed -+} -+ -+// Valid returns true if the lock acquired through m is still valid. It may -+// also return true erroneously if quorum is achieved during the call and at -+// least one node then takes long enough to respond for the lock to expire. -+// -+// Deprecated: Use Until instead. See https://github.com/go-redsync/redsync/issues/72. -+func (m *Mutex) Valid() (bool, error) { -+ return m.ValidContext(context.Background()) -+} -+ -+// ValidContext returns true if the lock acquired through m is still valid. It may -+// also return true erroneously if quorum is achieved during the call and at -+// least one node then takes long enough to respond for the lock to expire. -+// -+// Deprecated: Use Until instead. See https://github.com/go-redsync/redsync/issues/72. -+func (m *Mutex) ValidContext(ctx context.Context) (bool, error) { -+ n, err := m.actOnPoolsAsync(func(pool redis.Pool) (bool, error) { -+ return m.valid(ctx, pool) -+ }) -+ return n >= m.quorum, err -+} -+ -+func (m *Mutex) valid(ctx context.Context, pool redis.Pool) (bool, error) { -+ if m.value == """" { -+ return false, nil -+ } -+ conn, err := pool.Get(ctx) -+ if err != nil { -+ return false, err -+ } -+ defer conn.Close() -+ reply, err := conn.Get(m.name) -+ if err != nil { -+ return false, err -+ } -+ return m.value == reply, nil -+} -+ -+func genValue() (string, error) { -+ b := make([]byte, 16) -+ _, err := rand.Read(b) -+ if err != nil { -+ return """", err -+ } -+ return base64.StdEncoding.EncodeToString(b), nil -+} -+ -+func (m *Mutex) acquire(ctx context.Context, pool redis.Pool, value string) (bool, error) { -+ conn, err := pool.Get(ctx) -+ if err != nil { -+ return false, err -+ } -+ defer conn.Close() -+ reply, err := conn.SetNX(m.name, value, m.expiry) -+ if err != nil { -+ return false, err -+ } -+ return reply, nil -+} -+ -+var deleteScript = redis.NewScript(1, ` -+ local val = redis.call(""GET"", KEYS[1]) -+ if val == ARGV[1] then -+ return redis.call(""DEL"", KEYS[1]) -+ elseif val == false then -+ return -1 -+ else -+ return 0 -+ end -+`) -+ -+func (m *Mutex) release(ctx context.Context, pool redis.Pool, value string) (bool, error) { -+ conn, err := pool.Get(ctx) -+ if err != nil { -+ return false, err -+ } -+ defer conn.Close() -+ status, err := conn.Eval(deleteScript, m.name, value) -+ if err != nil { -+ return false, err -+ } -+ if status == int64(-1) { -+ return false, ErrLockAlreadyExpired -+ } -+ return status != int64(0), nil -+} -+ -+var touchWithSetNXScript = redis.NewScript(1, ` -+ if redis.call(""GET"", KEYS[1]) == ARGV[1] then -+ return redis.call(""PEXPIRE"", KEYS[1], ARGV[2]) -+ elseif redis.call(""SET"", KEYS[1], ARGV[1], ""PX"", ARGV[2], ""NX"") then -+ return 1 -+ else -+ return 0 -+ end -+`) -+ -+var touchScript = redis.NewScript(1, ` -+ if redis.call(""GET"", KEYS[1]) == ARGV[1] then -+ return redis.call(""PEXPIRE"", KEYS[1], ARGV[2]) -+ else -+ return 0 -+ end -+`) -+ -+func (m *Mutex) touch(ctx context.Context, pool redis.Pool, value string, expiry int) (bool, error) { -+ conn, err := pool.Get(ctx) -+ if err != nil { -+ return false, err -+ } -+ defer conn.Close() -+ -+ touchScript := touchScript -+ if m.setNXOnExtend { -+ touchScript = touchWithSetNXScript -+ } -+ -+ status, err := conn.Eval(touchScript, m.name, value, expiry) -+ if err != nil { -+ return false, err -+ } -+ return status != int64(0), nil -+} -+ -+func (m *Mutex) actOnPoolsAsync(actFn func(redis.Pool) (bool, error)) (int, error) { -+ type result struct { -+ node int -+ statusOK bool -+ err error -+ } -+ -+ ch := make(chan result, len(m.pools)) -+ for node, pool := range m.pools { -+ go func(node int, pool redis.Pool) { -+ r := result{node: node} -+ r.statusOK, r.err = actFn(pool) -+ ch <- r -+ }(node, pool) -+ } -+ -+ var ( -+ n = 0 -+ taken []int -+ err error -+ ) -+ -+ for range m.pools { -+ r := <-ch -+ if r.statusOK { -+ n++ -+ } else if r.err == ErrLockAlreadyExpired { -+ err = multierror.Append(err, ErrLockAlreadyExpired) -+ } else if r.err != nil { -+ err = multierror.Append(err, &RedisError{Node: r.node, Err: r.err}) -+ } else { -+ taken = append(taken, r.node) -+ err = multierror.Append(err, &ErrNodeTaken{Node: r.node}) -+ } -+ -+ if m.failFast { -+ // fast retrun -+ if n >= m.quorum { -+ return n, err -+ } -+ -+ // fail fast -+ if len(taken) >= m.quorum { -+ return n, &ErrTaken{Nodes: taken} -+ } -+ } -+ } -+ -+ if len(taken) >= m.quorum { -+ return n, &ErrTaken{Nodes: taken} -+ } -+ return n, err -+} -diff --git a/vendor/github.com/go-redsync/redsync/v4/redis/goredis/v9/goredis.go b/vendor/github.com/go-redsync/redsync/v4/redis/goredis/v9/goredis.go -new file mode 100644 -index 0000000000000..1ab59278f1b7f ---- /dev/null -+++ b/vendor/github.com/go-redsync/redsync/v4/redis/goredis/v9/goredis.go -@@ -0,0 +1,79 @@ -+package goredis -+ -+import ( -+ ""context"" -+ ""strings"" -+ ""time"" -+ -+ redsyncredis ""github.com/go-redsync/redsync/v4/redis"" -+ ""github.com/redis/go-redis/v9"" -+) -+ -+type pool struct { -+ delegate redis.UniversalClient -+} -+ -+func (p *pool) Get(ctx context.Context) (redsyncredis.Conn, error) { -+ if ctx == nil { -+ ctx = context.Background() -+ } -+ return &conn{p.delegate, ctx}, nil -+} -+ -+// NewPool returns a Goredis-based pool implementation. -+func NewPool(delegate redis.UniversalClient) redsyncredis.Pool { -+ return &pool{delegate} -+} -+ -+type conn struct { -+ delegate redis.UniversalClient -+ ctx context.Context -+} -+ -+func (c *conn) Get(name string) (string, error) { -+ value, err := c.delegate.Get(c.ctx, name).Result() -+ return value, noErrNil(err) -+} -+ -+func (c *conn) Set(name string, value string) (bool, error) { -+ reply, err := c.delegate.Set(c.ctx, name, value, 0).Result() -+ return reply == ""OK"", err -+} -+ -+func (c *conn) SetNX(name string, value string, expiry time.Duration) (bool, error) { -+ return c.delegate.SetNX(c.ctx, name, value, expiry).Result() -+} -+ -+func (c *conn) PTTL(name string) (time.Duration, error) { -+ return c.delegate.PTTL(c.ctx, name).Result() -+} -+ -+func (c *conn) Eval(script *redsyncredis.Script, keysAndArgs ...interface{}) (interface{}, error) { -+ keys := make([]string, script.KeyCount) -+ args := keysAndArgs -+ -+ if script.KeyCount > 0 { -+ for i := 0; i < script.KeyCount; i++ { -+ keys[i] = keysAndArgs[i].(string) -+ } -+ args = keysAndArgs[script.KeyCount:] -+ } -+ -+ v, err := c.delegate.EvalSha(c.ctx, script.Hash, keys, args...).Result() -+ if err != nil && strings.Contains(err.Error(), ""NOSCRIPT "") { -+ v, err = c.delegate.Eval(c.ctx, script.Src, keys, args...).Result() -+ } -+ return v, noErrNil(err) -+} -+ -+func (c *conn) Close() error { -+ // Not needed for this library -+ return nil -+} -+ -+func noErrNil(err error) error { -+ if err == redis.Nil { -+ return nil -+ } -+ return err -+} -diff --git a/vendor/github.com/go-redsync/redsync/v4/redis/redis.go b/vendor/github.com/go-redsync/redsync/v4/redis/redis.go -new file mode 100644 -index 0000000000000..145fd766f79d4 ---- /dev/null -+++ b/vendor/github.com/go-redsync/redsync/v4/redis/redis.go -@@ -0,0 +1,44 @@ -+package redis -+ -+import ( -+ ""context"" -+ ""crypto/sha1"" -+ ""encoding/hex"" -+ ""io"" -+ ""time"" -+) -+ -+// Pool maintains a pool of Redis connections. -+type Pool interface { -+ Get(ctx context.Context) (Conn, error) -+} -+ -+// Conn is a single Redis connection. -+type Conn interface { -+ Get(name string) (string, error) -+ Set(name string, value string) (bool, error) -+ SetNX(name string, value string, expiry time.Duration) (bool, error) -+ Eval(script *Script, keysAndArgs ...interface{}) (interface{}, error) -+ PTTL(name string) (time.Duration, error) -+ Close() error -+} -+ -+// Script encapsulates the source, hash and key count for a Lua script. -+// Taken from https://github.com/gomodule/redigo/blob/46992b0f02f74066bcdfd9b03e33bc03abd10dc7/redis/script.go#L24-L30 -+type Script struct { -+ KeyCount int -+ Src string -+ Hash string -+} -+ -+// NewScript returns a new script object. If keyCount is greater than or equal -+// to zero, then the count is automatically inserted in the EVAL command -+// argument list. If keyCount is less than zero, then the application supplies -+// the count as the first value in the keysAndArgs argument to the Do, Send and -+// SendHash methods. -+// Taken from https://github.com/gomodule/redigo/blob/46992b0f02f74066bcdfd9b03e33bc03abd10dc7/redis/script.go#L32-L41 -+func NewScript(keyCount int, src string) *Script { -+ h := sha1.New() -+ _, _ = io.WriteString(h, src) -+ return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))} -+} -diff --git a/vendor/github.com/go-redsync/redsync/v4/redsync.go b/vendor/github.com/go-redsync/redsync/v4/redsync.go -new file mode 100644 -index 0000000000000..edffa3f8a56b1 ---- /dev/null -+++ b/vendor/github.com/go-redsync/redsync/v4/redsync.go -@@ -0,0 +1,160 @@ -+package redsync -+ -+import ( -+ ""math/rand"" -+ ""time"" -+ -+ ""github.com/go-redsync/redsync/v4/redis"" -+) -+ -+const ( -+ minRetryDelayMilliSec = 50 -+ maxRetryDelayMilliSec = 250 -+) -+ -+// Redsync provides a simple method for creating distributed mutexes using multiple Redis connection pools. -+type Redsync struct { -+ pools []redis.Pool -+} -+ -+// New creates and returns a new Redsync instance from given Redis connection pools. -+func New(pools ...redis.Pool) *Redsync { -+ return &Redsync{ -+ pools: pools, -+ } -+} -+ -+// NewMutex returns a new distributed mutex with given name. -+func (r *Redsync) NewMutex(name string, options ...Option) *Mutex { -+ m := &Mutex{ -+ name: name, -+ expiry: 8 * time.Second, -+ tries: 32, -+ delayFunc: func(tries int) time.Duration { -+ return time.Duration(rand.Intn(maxRetryDelayMilliSec-minRetryDelayMilliSec)+minRetryDelayMilliSec) * time.Millisecond -+ }, -+ genValueFunc: genValue, -+ driftFactor: 0.01, -+ timeoutFactor: 0.05, -+ quorum: len(r.pools)/2 + 1, -+ pools: r.pools, -+ } -+ for _, o := range options { -+ o.Apply(m) -+ } -+ if m.shuffle { -+ randomPools(m.pools) -+ } -+ return m -+} -+ -+// An Option configures a mutex. -+type Option interface { -+ Apply(*Mutex) -+} -+ -+// OptionFunc is a function that configures a mutex. -+type OptionFunc func(*Mutex) -+ -+// Apply calls f(mutex) -+func (f OptionFunc) Apply(mutex *Mutex) { -+ f(mutex) -+} -+ -+// WithExpiry can be used to set the expiry of a mutex to the given value. -+// The default is 8s. -+func WithExpiry(expiry time.Duration) Option { -+ return OptionFunc(func(m *Mutex) { -+ m.expiry = expiry -+ }) -+} -+ -+// WithTries can be used to set the number of times lock acquire is attempted. -+// The default value is 32. -+func WithTries(tries int) Option { -+ return OptionFunc(func(m *Mutex) { -+ m.tries = tries -+ }) -+} -+ -+// WithRetryDelay can be used to set the amount of time to wait between retries. -+// The default value is rand(50ms, 250ms). -+func WithRetryDelay(delay time.Duration) Option { -+ return OptionFunc(func(m *Mutex) { -+ m.delayFunc = func(tries int) time.Duration { -+ return delay -+ } -+ }) -+} -+ -+// WithSetNXOnExtend improves extending logic to extend the key if exist -+// and if not, tries to set a new key in redis -+// Useful if your redises restart often and you want to reduce the chances of losing the lock -+// Read this MR for more info: https://github.com/go-redsync/redsync/pull/149 -+func WithSetNXOnExtend() Option { -+ return OptionFunc(func(m *Mutex) { -+ m.setNXOnExtend = true -+ }) -+} -+ -+// WithRetryDelayFunc can be used to override default delay behavior. -+func WithRetryDelayFunc(delayFunc DelayFunc) Option { -+ return OptionFunc(func(m *Mutex) { -+ m.delayFunc = delayFunc -+ }) -+} -+ -+// WithDriftFactor can be used to set the clock drift factor. -+// The default value is 0.01. -+func WithDriftFactor(factor float64) Option { -+ return OptionFunc(func(m *Mutex) { -+ m.driftFactor = factor -+ }) -+} -+ -+// WithTimeoutFactor can be used to set the timeout factor. -+// The default value is 0.05. -+func WithTimeoutFactor(factor float64) Option { -+ return OptionFunc(func(m *Mutex) { -+ m.timeoutFactor = factor -+ }) -+} -+ -+// WithGenValueFunc can be used to set the custom value generator. -+func WithGenValueFunc(genValueFunc func() (string, error)) Option { -+ return OptionFunc(func(m *Mutex) { -+ m.genValueFunc = genValueFunc -+ }) -+} -+ -+// WithValue can be used to assign the random value without having to call lock. -+// This allows the ownership of a lock to be ""transferred"" and allows the lock to be unlocked from elsewhere. -+func WithValue(v string) Option { -+ return OptionFunc(func(m *Mutex) { -+ m.value = v -+ }) -+} -+ -+// WithFailFast can be used to quickly acquire and release the lock. -+// When some Redis servers are blocking, we do not need to wait for responses from all the Redis servers response. -+// As long as the quorum is met, we can assume the lock is acquired. The effect of this parameter is to achieve low -+// latency, avoid Redis blocking causing Lock/Unlock to not return for a long time. -+func WithFailFast(b bool) Option { -+ return OptionFunc(func(m *Mutex) { -+ m.failFast = b -+ }) -+} -+ -+// WithShufflePools can be used to shuffle Redis pools to reduce centralized access in concurrent scenarios. -+func WithShufflePools(b bool) Option { -+ return OptionFunc(func(m *Mutex) { -+ m.shuffle = b -+ }) -+} -+ -+// randomPools shuffles Redis pools. -+func randomPools(pools []redis.Pool) { -+ rand.Shuffle(len(pools), func(i, j int) { -+ pools[i], pools[j] = pools[j], pools[i] -+ }) -+} -diff --git a/vendor/github.com/sony/gobreaker/v2/distributed_gobreaker.go b/vendor/github.com/sony/gobreaker/v2/distributed_gobreaker.go -new file mode 100644 -index 0000000000000..cc530c9e1d157 ---- /dev/null -+++ b/vendor/github.com/sony/gobreaker/v2/distributed_gobreaker.go -@@ -0,0 +1,216 @@ -+package gobreaker -+ -+import ( -+ ""encoding/json"" -+ ""errors"" -+ ""time"" -+) -+ -+var ( -+ // ErrNoSharedStore is returned when there is no shared store. -+ ErrNoSharedStore = errors.New(""no shared store"") -+ // ErrNoSharedState is returned when there is no shared state. -+ ErrNoSharedState = errors.New(""no shared state"") -+) -+ -+// SharedState represents the shared state of DistributedCircuitBreaker. -+type SharedState struct { -+ State State `json:""state""` -+ Generation uint64 `json:""generation""` -+ Counts Counts `json:""counts""` -+ Expiry time.Time `json:""expiry""` -+} -+ -+// SharedDataStore stores the shared state of DistributedCircuitBreaker. -+type SharedDataStore interface { -+ Lock(name string) error -+ Unlock(name string) error -+ GetData(name string) ([]byte, error) -+ SetData(name string, data []byte) error -+} -+ -+// DistributedCircuitBreaker extends CircuitBreaker with SharedDataStore. -+type DistributedCircuitBreaker[T any] struct { -+ *CircuitBreaker[T] -+ store SharedDataStore -+} -+ -+// NewDistributedCircuitBreaker returns a new DistributedCircuitBreaker. -+func NewDistributedCircuitBreaker[T any](store SharedDataStore, settings Settings) (dcb *DistributedCircuitBreaker[T], err error) { -+ if store == nil { -+ return nil, ErrNoSharedStore -+ } -+ -+ dcb = &DistributedCircuitBreaker[T]{ -+ CircuitBreaker: NewCircuitBreaker[T](settings), -+ store: store, -+ } -+ -+ err = dcb.lock() -+ if err != nil { -+ return nil, err -+ } -+ defer func() { -+ e := dcb.unlock() -+ if err == nil { -+ err = e -+ } -+ }() -+ -+ _, err = dcb.getSharedState() -+ if err == ErrNoSharedState { -+ err = dcb.setSharedState(dcb.extract()) -+ } -+ if err != nil { -+ return nil, err -+ } -+ -+ return dcb, nil -+} -+ -+const ( -+ mutexTimeout = 5 * time.Second -+ mutexWaitTime = 500 * time.Millisecond -+) -+ -+func (dcb *DistributedCircuitBreaker[T]) mutexKey() string { -+ return ""gobreaker:mutex:"" + dcb.name -+} -+ -+func (dcb *DistributedCircuitBreaker[T]) lock() error { -+ if dcb.store == nil { -+ return ErrNoSharedStore -+ } -+ -+ var err error -+ expiry := time.Now().Add(mutexTimeout) -+ for time.Now().Before(expiry) { -+ err = dcb.store.Lock(dcb.mutexKey()) -+ if err == nil { -+ return nil -+ } -+ -+ time.Sleep(mutexWaitTime) -+ } -+ return err -+} -+ -+func (dcb *DistributedCircuitBreaker[T]) unlock() error { -+ if dcb.store == nil { -+ return ErrNoSharedStore -+ } -+ -+ return dcb.store.Unlock(dcb.mutexKey()) -+} -+ -+func (dcb *DistributedCircuitBreaker[T]) sharedStateKey() string { -+ return ""gobreaker:state:"" + dcb.name -+} -+ -+func (dcb *DistributedCircuitBreaker[T]) getSharedState() (SharedState, error) { -+ var state SharedState -+ if dcb.store == nil { -+ return state, ErrNoSharedStore -+ } -+ -+ data, err := dcb.store.GetData(dcb.sharedStateKey()) -+ if len(data) == 0 { -+ return state, ErrNoSharedState -+ } else if err != nil { -+ return state, err -+ } -+ -+ err = json.Unmarshal(data, &state) -+ return state, err -+} -+ -+func (dcb *DistributedCircuitBreaker[T]) setSharedState(state SharedState) error { -+ if dcb.store == nil { -+ return ErrNoSharedStore -+ } -+ -+ data, err := json.Marshal(state) -+ if err != nil { -+ return err -+ } -+ -+ return dcb.store.SetData(dcb.sharedStateKey(), data) -+} -+ -+func (dcb *DistributedCircuitBreaker[T]) inject(shared SharedState) { -+ dcb.mutex.Lock() -+ defer dcb.mutex.Unlock() -+ -+ dcb.state = shared.State -+ dcb.generation = shared.Generation -+ dcb.counts = shared.Counts -+ dcb.expiry = shared.Expiry -+} -+ -+func (dcb *DistributedCircuitBreaker[T]) extract() SharedState { -+ dcb.mutex.Lock() -+ defer dcb.mutex.Unlock() -+ -+ return SharedState{ -+ State: dcb.state, -+ Generation: dcb.generation, -+ Counts: dcb.counts, -+ Expiry: dcb.expiry, -+ } -+} -+ -+// State returns the State of DistributedCircuitBreaker. -+func (dcb *DistributedCircuitBreaker[T]) State() (state State, err error) { -+ shared, err := dcb.getSharedState() -+ if err != nil { -+ return shared.State, err -+ } -+ -+ err = dcb.lock() -+ if err != nil { -+ return state, err -+ } -+ defer func() { -+ e := dcb.unlock() -+ if err == nil { -+ err = e -+ } -+ }() -+ -+ dcb.inject(shared) -+ state = dcb.CircuitBreaker.State() -+ shared = dcb.extract() -+ -+ err = dcb.setSharedState(shared) -+ return state, err -+} -+ -+// Execute runs the given request if the DistributedCircuitBreaker accepts it. -+func (dcb *DistributedCircuitBreaker[T]) Execute(req func() (T, error)) (t T, err error) { -+ shared, err := dcb.getSharedState() -+ if err != nil { -+ return t, err -+ } -+ -+ err = dcb.lock() -+ if err != nil { -+ return t, err -+ } -+ defer func() { -+ e := dcb.unlock() -+ if err == nil { -+ err = e -+ } -+ }() -+ -+ dcb.inject(shared) -+ t, err = dcb.CircuitBreaker.Execute(req) -+ shared = dcb.extract() -+ -+ e := dcb.setSharedState(shared) -+ if e != nil { -+ return t, e -+ } -+ -+ return t, err -+} -diff --git a/vendor/github.com/sony/gobreaker/v2/redis_store.go b/vendor/github.com/sony/gobreaker/v2/redis_store.go -new file mode 100644 -index 0000000000000..0f70f3a85dd88 ---- /dev/null -+++ b/vendor/github.com/sony/gobreaker/v2/redis_store.go -@@ -0,0 +1,64 @@ -+package gobreaker -+ -+import ( -+ ""context"" -+ ""errors"" -+ -+ ""github.com/go-redsync/redsync/v4"" -+ ""github.com/go-redsync/redsync/v4/redis/goredis/v9"" -+ ""github.com/redis/go-redis/v9"" -+) -+ -+type RedisStore struct { -+ ctx context.Context -+ client *redis.Client -+ rs *redsync.Redsync -+ mutex map[string]*redsync.Mutex -+} -+ -+func NewRedisStore(addr string) *RedisStore { -+ client := redis.NewClient(&redis.Options{ -+ Addr: addr, -+ }) -+ return &RedisStore{ -+ ctx: context.Background(), -+ client: client, -+ rs: redsync.New(goredis.NewPool(client)), -+ mutex: map[string]*redsync.Mutex{}, -+ } -+} -+ -+func (rs *RedisStore) Lock(name string) error { -+ mutex, ok := rs.mutex[name] -+ if ok { -+ return mutex.Lock() -+ } -+ -+ mutex = rs.rs.NewMutex(name, redsync.WithExpiry(mutexTimeout)) -+ rs.mutex[name] = mutex -+ return mutex.Lock() -+} -+ -+func (rs *RedisStore) Unlock(name string) error { -+ mutex, ok := rs.mutex[name] -+ if ok { -+ var err error -+ ok, err = mutex.Unlock() -+ if ok && err == nil { -+ return nil -+ } -+ } -+ return errors.New(""unlock failed"") -+} -+ -+func (rs *RedisStore) GetData(name string) ([]byte, error) { -+ return rs.client.Get(rs.ctx, name).Bytes() -+} -+ -+func (rs *RedisStore) SetData(name string, data []byte) error { -+ return rs.client.Set(rs.ctx, name, data, 0).Err() -+} -+ -+func (rs *RedisStore) Close() { -+ rs.client.Close() -+} -diff --git a/vendor/modules.txt b/vendor/modules.txt -index 43ab0c3acc211..87299bd368e80 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -831,6 +831,11 @@ github.com/go-playground/universal-translator - # github.com/go-playground/validator/v10 v10.19.0 - ## explicit; go 1.18 - github.com/go-playground/validator/v10 -+# github.com/go-redsync/redsync/v4 v4.13.0 -+## explicit; go 1.22 -+github.com/go-redsync/redsync/v4 -+github.com/go-redsync/redsync/v4/redis -+github.com/go-redsync/redsync/v4/redis/goredis/v9 - # github.com/go-zookeeper/zk v1.0.3 - ## explicit; go 1.13 - github.com/go-zookeeper/zk -@@ -1600,8 +1605,8 @@ github.com/shurcooL/vfsgen - # github.com/sirupsen/logrus v1.9.3 - ## explicit; go 1.13 - github.com/sirupsen/logrus --# github.com/sony/gobreaker/v2 v2.0.0 --## explicit; go 1.21 -+# github.com/sony/gobreaker/v2 v2.1.0 -+## explicit; go 1.22.0 - github.com/sony/gobreaker/v2 - # github.com/spaolacci/murmur3 v1.1.0 - ## explicit",fix,"update module github.com/sony/gobreaker/v2 to v2.1.0 (#15556) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -a253fa3102d75560d9f96949d91362dd3f494bce,2025-03-05 04:32:36,J Stickler,"docs: clarify OTEL endpoint (#16289) - -Signed-off-by: J Stickler ",False,"diff --git a/docs/sources/release-notes/v3-3.md b/docs/sources/release-notes/v3-3.md -index d82b2abc040fd..ddd55a3add2a8 100644 ---- a/docs/sources/release-notes/v3-3.md -+++ b/docs/sources/release-notes/v3-3.md -@@ -198,7 +198,7 @@ For important upgrade guidance, refer to the [Upgrade Guide](https://grafana.com - - **mixins:** Add backend path section in loki-operational for single scalable deployment ([#13023](https://github.com/grafana/loki/issues/13023)) ([16881ab](https://github.com/grafana/loki/commit/16881ab0d3b9e9e6bfc37f22ff69f5f1019a0df1)). - - **mixins:** Disk space utilization panels with latest KSM versions ([#13486](https://github.com/grafana/loki/issues/13486)) ([0ea7431](https://github.com/grafana/loki/commit/0ea7431139ae0a18ef4e90bed836a7a6b92ab890)). - - **mixins:** Retention dashboards fix metric name ([#14617](https://github.com/grafana/loki/issues/14617)) ([c762b9b](https://github.com/grafana/loki/commit/c762b9b5d3877e7cbfc41d8ab9a1a4287ebe97b2)). --- **oltp:** Enable service detection for otlp endpoint ([#14036](https://github.com/grafana/loki/issues/14036)) ([4f962ef](https://github.com/grafana/loki/commit/4f962ef7af250fc347dbed15583787d0238f6e9f)). -+- **otlp:** Enable service detection for otlp endpoint ([#14036](https://github.com/grafana/loki/issues/14036)) ([4f962ef](https://github.com/grafana/loki/commit/4f962ef7af250fc347dbed15583787d0238f6e9f)). - - **operator:** Add 1x.pico OpenShift UI dropdown menu ([#14660](https://github.com/grafana/loki/issues/14660)) ([4687f37](https://github.com/grafana/loki/commit/4687f377db0a7ae07ffdea354582c882c10b72c4)). - - **operator:** Add missing groupBy label for all rules on OpenShift ([#14279](https://github.com/grafana/loki/issues/14279)) ([ce7b2e8](https://github.com/grafana/loki/commit/ce7b2e89d9470e4e6a61a94f2b51ff8b938b5a5e)). - - **operator:** Correctly ignore again BlotDB dashboards ([#14587](https://github.com/grafana/loki/issues/14587)) ([4879d10](https://github.com/grafana/loki/commit/4879d106bbeea29e331ddb7c9a49274600190032)). -diff --git a/docs/sources/send-data/alloy/examples/alloy-kafka-logs.md b/docs/sources/send-data/alloy/examples/alloy-kafka-logs.md -index e361aef8af46d..9ee83f937a0c5 100644 ---- a/docs/sources/send-data/alloy/examples/alloy-kafka-logs.md -+++ b/docs/sources/send-data/alloy/examples/alloy-kafka-logs.md -@@ -195,7 +195,7 @@ And finally, add the following configuration to the `config.alloy` file: - ```alloy - loki.write ""http"" { - endpoint { -- url = ""http://loki:3100/loki/api/v1/push"" -+ url = ""http://:3100/otlp/v1/logs"" - } - } - ``` -@@ -299,7 +299,7 @@ Finally, add the following configuration to the `config.alloy` file: - ```alloy - otelcol.exporter.otlphttp ""default"" { - client { -- endpoint = ""http://loki:3100/otlp"" -+ endpoint = ""http://:3100/otlp/v1/logs"" - } - } - ``` -diff --git a/docs/sources/send-data/alloy/examples/alloy-otel-logs.md b/docs/sources/send-data/alloy/examples/alloy-otel-logs.md -index ee1cf8aa66690..296c9b64e778d 100644 ---- a/docs/sources/send-data/alloy/examples/alloy-otel-logs.md -+++ b/docs/sources/send-data/alloy/examples/alloy-otel-logs.md -@@ -181,7 +181,7 @@ Now add the following configuration to the `config.alloy` file: - ```alloy - otelcol.exporter.otlphttp ""default"" { - client { -- endpoint = ""http://loki:3100/otlp"" -+ endpoint = ""http://:3100/otlp/v1/logs"" - } - } - ```",docs,"clarify OTEL endpoint (#16289) - -Signed-off-by: J Stickler " -2af3ca077e2c70110cda2c789fbcde8b90ab8048,2022-02-03 23:09:26,Owen Diehl,"Logql/parallel binop (#5317) - -* adds justification for keeping Downstreamer parallelism - -* loads binop legs in parallel - -* increases downstreamer default concurrency - -* astmapper spanlogger - -* always clone expr during mapping to prevent mutability bugs - -* Revert ""astmapper spanlogger"" - -This reverts commit 23f6b55c14faccaf659bf083995d7c39ed45c379. - -* cleanup + use errgroup",False,"diff --git a/pkg/logql/ast.go b/pkg/logql/ast.go -index 07aba11efd444..a02844a7e6744 100644 ---- a/pkg/logql/ast.go -+++ b/pkg/logql/ast.go -@@ -26,6 +26,10 @@ type Expr interface { - fmt.Stringer - } - -+func Clone(e Expr) (Expr, error) { -+ return ParseExpr(e.String()) -+} -+ - type QueryParams interface { - LogSelector() (LogSelectorExpr, error) - GetStart() time.Time -diff --git a/pkg/logql/evaluator.go b/pkg/logql/evaluator.go -index e9fbcfc1f29f0..bf0a5fa8f9c82 100644 ---- a/pkg/logql/evaluator.go -+++ b/pkg/logql/evaluator.go -@@ -11,6 +11,7 @@ import ( - ""github.com/pkg/errors"" - ""github.com/prometheus/prometheus/model/labels"" - ""github.com/prometheus/prometheus/promql"" -+ ""golang.org/x/sync/errgroup"" - - ""github.com/grafana/loki/pkg/iter"" - ""github.com/grafana/loki/pkg/logproto"" -@@ -547,16 +548,31 @@ func binOpStepEvaluator( - ) - } - -- // we have two non literal legs -- lse, err := ev.StepEvaluator(ctx, ev, expr.SampleExpr, q) -- if err != nil { -- return nil, err -- } -- rse, err := ev.StepEvaluator(ctx, ev, expr.RHS, q) -- if err != nil { -+ var lse, rse StepEvaluator -+ g, ctx := errgroup.WithContext(ctx) -+ -+ // We have two non literal legs, -+ // load them in parallel -+ g.Go(func() error { -+ var err error -+ lse, err = ev.StepEvaluator(ctx, ev, expr.SampleExpr, q) -+ return err -+ }) -+ g.Go(func() error { -+ var err error -+ rse, err = ev.StepEvaluator(ctx, ev, expr.RHS, q) -+ return err -+ }) -+ -+ // ensure both sides are loaded before returning the combined evaluator -+ if err := g.Wait(); err != nil { - return nil, err - } - -+ // keep a scoped reference to err as it's referenced in the Error() -+ // implementation of this StepEvaluator -+ var scopedErr error -+ - return newStepEvaluator(func() (bool, int64, promql.Vector) { - var ( - ts int64 -@@ -593,7 +609,7 @@ func binOpStepEvaluator( - case OpTypeUnless: - results = vectorUnless(lhs, rhs, lsigs, rsigs) - default: -- results, err = vectorBinop(expr.Op, expr.Opts, lhs, rhs, lsigs, rsigs) -+ results, scopedErr = vectorBinop(expr.Op, expr.Opts, lhs, rhs, lsigs, rsigs) - } - return true, ts, results - }, func() (lastError error) { -@@ -605,8 +621,8 @@ func binOpStepEvaluator( - return lastError - }, func() error { - var errs []error -- if err != nil { -- errs = append(errs, err) -+ if scopedErr != nil { -+ errs = append(errs, scopedErr) - } - for _, ev := range []StepEvaluator{lse, rse} { - if err := ev.Error(); err != nil { -diff --git a/pkg/logql/shardmapper.go b/pkg/logql/shardmapper.go -index f7fdbfec3892f..d6a1f93f70f5a 100644 ---- a/pkg/logql/shardmapper.go -+++ b/pkg/logql/shardmapper.go -@@ -111,8 +111,8 @@ func (m ShardMapper) Parse(query string) (noop bool, expr Expr, err error) { - return false, nil, err - } - -- mappedStr := mapped.String() - originalStr := parsed.String() -+ mappedStr := mapped.String() - noop = originalStr == mappedStr - if noop { - m.metrics.parsed.WithLabelValues(NoopKey).Inc() -@@ -126,6 +126,12 @@ func (m ShardMapper) Parse(query string) (noop bool, expr Expr, err error) { - } - - func (m ShardMapper) Map(expr Expr, r *shardRecorder) (Expr, error) { -+ // immediately clone the passed expr to avoid mutating the original -+ expr, err := Clone(expr) -+ if err != nil { -+ return nil, err -+ } -+ - switch e := expr.(type) { - case *LiteralExpr: - return e, nil -diff --git a/pkg/querier/queryrange/downstreamer.go b/pkg/querier/queryrange/downstreamer.go -index 976e878f2ffc1..f4ae9e756d8fd 100644 ---- a/pkg/querier/queryrange/downstreamer.go -+++ b/pkg/querier/queryrange/downstreamer.go -@@ -18,7 +18,7 @@ import ( - ) - - const ( -- DefaultDownstreamConcurrency = 32 -+ DefaultDownstreamConcurrency = 128 - ) - - type DownstreamHandler struct { -@@ -48,6 +48,12 @@ func ParamsToLokiRequest(params logql.Params, shards logql.Shards) queryrangebas - } - } - -+// Note: After the introduction of the LimitedRoundTripper, -+// bounding concurrency in the downstreamer is mostly redundant -+// The reason we don't remove it is to prevent malicious queries -+// from creating an unreasonably large number of goroutines, such as -+// the case of a query like `a / a / a / a / a ..etc`, which could try -+// to shard each leg, quickly dispatching an unreasonable number of goroutines. - func (h DownstreamHandler) Downstreamer() logql.Downstreamer { - p := DefaultDownstreamConcurrency - locks := make(chan struct{}, p)",unknown,"Logql/parallel binop (#5317) - -* adds justification for keeping Downstreamer parallelism - -* loads binop legs in parallel - -* increases downstreamer default concurrency - -* astmapper spanlogger - -* always clone expr during mapping to prevent mutability bugs - -* Revert ""astmapper spanlogger"" - -This reverts commit 23f6b55c14faccaf659bf083995d7c39ed45c379. - -* cleanup + use errgroup" -328ad2363454eb519f3fdede2b4958e84647d7ad,2019-12-02 19:32:14,Daniel González,"Fix duration format (#1343) - -Signed-off-by: Daniel González Lopes ",False,"diff --git a/docs/configuration/README.md b/docs/configuration/README.md -index 3ce31f652d94b..3d7cf67a7325b 100644 ---- a/docs/configuration/README.md -+++ b/docs/configuration/README.md -@@ -36,7 +36,7 @@ Generic placeholders are defined as follows: - - * ``: a boolean that can take the values `true` or `false` - * ``: any integer matching the regular expression `[1-9]+[0-9]*` --* ``: a duration matching the regular expression `[0-9]+(ms|[smhdwy])` -+* ``: a duration matching the regular expression `[0-9]+(ns|us|µs|ms|[smh])` - * ``: a string matching the regular expression `[a-zA-Z_][a-zA-Z0-9_]*` - * ``: a string of unicode characters - * ``: a valid path relative to current working directory or an",unknown,"Fix duration format (#1343) - -Signed-off-by: Daniel González Lopes " -a4b897482b319970fe1ea5c6133be66454881ff9,2021-09-08 14:34:15,Kaviraj,"Add custom UnmarshalJSON for bytesize type (#4289) - -* Add custom UnmarshalJSON for bytesize type - -* Add more test case - -* Fix limits test",False,"diff --git a/pkg/util/flagext/bytesize.go b/pkg/util/flagext/bytesize.go -index 76c61d69d413b..0d59a6aba2710 100644 ---- a/pkg/util/flagext/bytesize.go -+++ b/pkg/util/flagext/bytesize.go -@@ -1,6 +1,7 @@ - package flagext - - import ( -+ ""encoding/json"" - ""strings"" - - ""github.com/c2h5oh/datasize"" -@@ -44,3 +45,14 @@ func (bs *ByteSize) UnmarshalYAML(unmarshal func(interface{}) error) error { - - return bs.Set(str) - } -+ -+// UnmarshalJSON implements json.Unmarsal interface to work with JSON. -+func (bs *ByteSize) UnmarshalJSON(val []byte) error { -+ var str string -+ -+ if err := json.Unmarshal(val, &str); err != nil { -+ return err -+ } -+ -+ return bs.Set(str) -+} -diff --git a/pkg/util/flagext/bytesize_test.go b/pkg/util/flagext/bytesize_test.go -index 95331af97797d..363645f9c1bba 100644 ---- a/pkg/util/flagext/bytesize_test.go -+++ b/pkg/util/flagext/bytesize_test.go -@@ -3,6 +3,8 @@ package flagext - import ( - ""testing"" - -+ ""encoding/json"" -+ - ""github.com/stretchr/testify/require"" - ""gopkg.in/yaml.v2"" - ) -@@ -102,3 +104,38 @@ func Test_ByteSizeYAML(t *testing.T) { - }) - } - } -+ -+func Test_ByteSizeJSON(t *testing.T) { -+ for _, tc := range []struct { -+ in string -+ err bool -+ out ByteSize -+ }{ -+ { -+ in: `{ ""bytes"": ""256GB"" }`, -+ out: ByteSize(256 << 30), -+ }, -+ { -+ // JSON shouldn't allow to set integer as value for ByteSize field. -+ in: `{ ""bytes"": 2.62144e+07 }`, -+ err: true, -+ }, -+ { -+ in: `{ ""bytes"": ""abc"" }`, -+ err: true, -+ }, -+ } { -+ t.Run(tc.in, func(t *testing.T) { -+ var out struct { -+ Bytes ByteSize `json:""bytes""` -+ } -+ err := json.Unmarshal([]byte(tc.in), &out) -+ if tc.err { -+ require.NotNil(t, err) -+ } else { -+ require.Nil(t, err) -+ require.Equal(t, tc.out, out.Bytes) -+ } -+ }) -+ } -+} -diff --git a/pkg/validation/limits_test.go b/pkg/validation/limits_test.go -index 9605bbd44f5ef..727f86b924133 100644 ---- a/pkg/validation/limits_test.go -+++ b/pkg/validation/limits_test.go -@@ -76,7 +76,7 @@ per_tenant_override_period: 230s - ""reject_old_samples_max_age"": ""40s"", - ""creation_grace_period"": ""50s"", - ""enforce_metric_name"": true, -- ""max_line_size"": 60, -+ ""max_line_size"": ""60"", - ""max_line_size_truncate"": true, - ""max_streams_per_user"": 70, - ""max_global_streams_per_user"": 80,",unknown,"Add custom UnmarshalJSON for bytesize type (#4289) - -* Add custom UnmarshalJSON for bytesize type - -* Add more test case - -* Fix limits test" -4419d0f33e9f4f6f9305d89dd6f2ca47e3a18d8c,2024-11-01 20:00:10,renovate[bot],"fix(deps): update module github.com/schollz/progressbar/v3 to v3.17.0 (#14720) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/go.mod b/go.mod -index bb64a5358c6d1..37e9113ef42c6 100644 ---- a/go.mod -+++ b/go.mod -@@ -137,7 +137,7 @@ require ( - github.com/prometheus/alertmanager v0.27.0 - github.com/prometheus/common/sigv4 v0.1.0 - github.com/richardartoul/molecule v1.0.0 -- github.com/schollz/progressbar/v3 v3.14.6 -+ github.com/schollz/progressbar/v3 v3.17.0 - github.com/shirou/gopsutil/v4 v4.24.10 - github.com/thanos-io/objstore v0.0.0-20241015070247-5f04b8b0b52a - github.com/twmb/franz-go v1.17.1 -diff --git a/go.sum b/go.sum -index 4acfdb9c7e48a..459090b348a3b 100644 ---- a/go.sum -+++ b/go.sum -@@ -442,6 +442,8 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL - github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= - github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= - github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= -+github.com/chengxilo/virtualterm v1.0.4 h1:Z6IpERbRVlfB8WkOmtbHiDbBANU7cimRIof7mk9/PwM= -+github.com/chengxilo/virtualterm v1.0.4/go.mod h1:DyxxBZz/x1iqJjFxTFcr6/x+jSpqN0iwWCOK1q10rlY= - github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= - github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= - github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= -@@ -1303,7 +1305,6 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V - github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= - github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= - github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= --github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw= - github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo= - github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= - github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -@@ -1408,6 +1409,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D - github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= - github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= - github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -+github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= -+github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= - github.com/mattn/go-tty v0.0.0-20180219170247-931426f7535a/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= - github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= - github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b/go.mod h1:WYK/Z/aXq9cbMFIL5ihcA4sX/r/3/WCas/Qvs/2fXcA= -@@ -1710,8 +1713,8 @@ github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdh - github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 h1:BkTk4gynLjguayxrYxZoMZjBnAOh7ntQvUkOFmkMqPU= - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= --github.com/schollz/progressbar/v3 v3.14.6 h1:GyjwcWBAf+GFDMLziwerKvpuS7ZF+mNTAXIB2aspiZs= --github.com/schollz/progressbar/v3 v3.14.6/go.mod h1:Nrzpuw3Nl0srLY0VlTvC4V6RL50pcEymjy6qyJAaLa0= -+github.com/schollz/progressbar/v3 v3.17.0 h1:Fv+vG6O6jnJwdjCelvfyYO7sF2jaUGQVmdH4CxcZdsQ= -+github.com/schollz/progressbar/v3 v3.17.0/go.mod h1:5H4fLgifX+KeQCsEJnZTOepgZLe1jFF1lpPXb68IJTA= - github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627/go.mod h1:7zjs06qF79/FKAJpBvFx3P8Ww4UTIMAe+lpNXDHziac= - github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6qbIiqJ6/Bqeq25bCLbL7YFmpaFfJDuM= - github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= -@@ -2324,7 +2327,6 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= - golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= --golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= - golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= - golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= - golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -@@ -2335,7 +2337,6 @@ golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= - golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= - golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= - golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= --golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= - golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= - golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= - golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -diff --git a/vendor/github.com/schollz/progressbar/v3/progressbar.go b/vendor/github.com/schollz/progressbar/v3/progressbar.go -index 56c147e813e24..251e5833fe84f 100644 ---- a/vendor/github.com/schollz/progressbar/v3/progressbar.go -+++ b/vendor/github.com/schollz/progressbar/v3/progressbar.go -@@ -45,17 +45,20 @@ type state struct { - isAltSaucerHead bool - - lastShown time.Time -- startTime time.Time -+ startTime time.Time // time when the progress bar start working - - counterTime time.Time - counterNumSinceLast int64 - counterLastTenRates []float64 -+ spinnerIdx int // the index of spinner - - maxLineWidth int - currentBytes float64 - finished bool - exit bool // Progress bar exit halfway - -+ details []string // details to show,only used when detail row is set to more than 0 -+ - rendered string - } - -@@ -80,6 +83,9 @@ type config struct { - showIterationsPerSecond bool - showIterationsCount bool - -+ // whether the progress bar should show the total bytes (e.g. 23/24 or 23/-, vs. just 23). -+ showTotalBytes bool -+ - // whether the progress bar should show elapsed time. - // always enabled if predictTime is true. - elapsedTime bool -@@ -103,6 +109,11 @@ type config struct { - // spinnerTypeOptionUsed remembers if the spinnerType was changed manually - spinnerTypeOptionUsed bool - -+ // spinnerChangeInterval the change interval of spinner -+ // if set this attribute to 0, the spinner only change when renderProgressBar was called -+ // for example, each time when Add() was called,which will call renderProgressBar function -+ spinnerChangeInterval time.Duration -+ - // spinner represents the spinner as a slice of string - spinner []string - -@@ -123,6 +134,9 @@ type config struct { - // showDescriptionAtLineEnd specifies whether description should be written at line end instead of line start - showDescriptionAtLineEnd bool - -+ // specifies how many rows of details to show,default value is 0 and no details will be shown -+ maxDetailRow int -+ - stdBuffer bytes.Buffer - } - -@@ -134,7 +148,45 @@ type Theme struct { - SaucerPadding string - BarStart string - BarEnd string --} -+ -+ // BarStartFilled is used after the Bar starts filling, if set. Otherwise, it defaults to BarStart. -+ BarStartFilled string -+ -+ // BarEndFilled is used once the Bar finishes, if set. Otherwise, it defaults to BarEnd. -+ BarEndFilled string -+} -+ -+var ( -+ // ThemeDefault is given by default (if not changed with OptionSetTheme), and it looks like ""|████ |"". -+ ThemeDefault = Theme{Saucer: ""█"", SaucerPadding: "" "", BarStart: ""|"", BarEnd: ""|""} -+ -+ // ThemeASCII is a predefined Theme that uses ASCII symbols. It looks like ""[===>...]"". -+ // Configure it with OptionSetTheme(ThemeASCII). -+ ThemeASCII = Theme{ -+ Saucer: ""="", -+ SaucerHead: "">"", -+ SaucerPadding: ""."", -+ BarStart: ""["", -+ BarEnd: ""]"", -+ } -+ -+ // ThemeUnicode is a predefined Theme that uses Unicode characters, displaying a graphic bar. -+ // It looks like """" (rendering will depend on font being used). -+ // It requires special symbols usually found in ""nerd fonts"" [2], or in Fira Code [1], and other sources. -+ // Configure it with OptionSetTheme(ThemeUnicode). -+ // -+ // [1] https://github.com/tonsky/FiraCode -+ // [2] https://www.nerdfonts.com/ -+ ThemeUnicode = Theme{ -+ Saucer: ""\uEE04"", //  -+ SaucerHead: ""\uEE04"", //  -+ SaucerPadding: ""\uEE01"", //  -+ BarStart: ""\uEE00"", //  -+ BarStartFilled: ""\uEE03"", //  -+ BarEnd: ""\uEE02"", //  -+ BarEndFilled: ""\uEE05"", //  -+ } -+) - - // Option is the type all options need to adhere to - type Option func(p *ProgressBar) -@@ -146,6 +198,18 @@ func OptionSetWidth(s int) Option { - } - } - -+// OptionSetSpinnerChangeInterval sets the spinner change interval -+// the spinner will change according to this value. -+// By default, this value is 100 * time.Millisecond -+// If you don't want to let this progressbar update by specified time interval -+// you can set this value to zero, then the spinner will change each time rendered, -+// such as when Add() or Describe() was called -+func OptionSetSpinnerChangeInterval(interval time.Duration) Option { -+ return func(p *ProgressBar) { -+ p.config.spinnerChangeInterval = interval -+ } -+} -+ - // OptionSpinnerType sets the type of spinner used for indeterminate bars - func OptionSpinnerType(spinnerType int) Option { - return func(p *ProgressBar) { -@@ -162,7 +226,8 @@ func OptionSpinnerCustom(spinner []string) Option { - } - } - --// OptionSetTheme sets the elements the bar is constructed of -+// OptionSetTheme sets the elements the bar is constructed with. -+// There are two pre-defined themes you can use: ThemeASCII and ThemeUnicode. - func OptionSetTheme(t Theme) Option { - return func(p *ProgressBar) { - p.config.theme = t -@@ -240,13 +305,20 @@ func OptionShowIts() Option { - } - } - --// OptionShowElapsedOnFinish will keep the display of elapsed time on finish -+// OptionShowElapsedTimeOnFinish will keep the display of elapsed time on finish. - func OptionShowElapsedTimeOnFinish() Option { - return func(p *ProgressBar) { - p.config.showElapsedTimeOnFinish = true - } - } - -+// OptionShowTotalBytes will keep the display of total bytes. -+func OptionShowTotalBytes(flag bool) Option { -+ return func(p *ProgressBar) { -+ p.config.showTotalBytes = flag -+ } -+} -+ - // OptionSetItsString sets what's displayed for iterations a second. The default is ""it"" which would display: ""it/s"" - func OptionSetItsString(iterationString string) Option { - return func(p *ProgressBar) { -@@ -262,7 +334,7 @@ func OptionThrottle(duration time.Duration) Option { - } - } - --// OptionClearOnFinish will clear the bar once its finished -+// OptionClearOnFinish will clear the bar once its finished. - func OptionClearOnFinish() Option { - return func(p *ProgressBar) { - p.config.clearOnFinish = true -@@ -308,7 +380,13 @@ func OptionShowDescriptionAtLineEnd() Option { - } - } - --var defaultTheme = Theme{Saucer: ""█"", SaucerPadding: "" "", BarStart: ""|"", BarEnd: ""|""} -+// OptionSetMaxDetailRow sets the max row of details -+// the row count should be less than the terminal height, otherwise it will not give you the output you want -+func OptionSetMaxDetailRow(row int) Option { -+ return func(p *ProgressBar) { -+ p.config.maxDetailRow = row -+ } -+} - - // NewOptions constructs a new instance of ProgressBar, with any options you specify - func NewOptions(max int, options ...Option) *ProgressBar { -@@ -318,18 +396,24 @@ func NewOptions(max int, options ...Option) *ProgressBar { - // NewOptions64 constructs a new instance of ProgressBar, with any options you specify - func NewOptions64(max int64, options ...Option) *ProgressBar { - b := ProgressBar{ -- state: getBasicState(), -+ state: state{ -+ startTime: time.Time{}, -+ lastShown: time.Time{}, -+ counterTime: time.Time{}, -+ }, - config: config{ -- writer: os.Stdout, -- theme: defaultTheme, -- iterationString: ""it"", -- width: 40, -- max: max, -- throttleDuration: 0 * time.Nanosecond, -- elapsedTime: max == -1, -- predictTime: true, -- spinnerType: 9, -- invisible: false, -+ writer: os.Stdout, -+ theme: ThemeDefault, -+ iterationString: ""it"", -+ width: 40, -+ max: max, -+ throttleDuration: 0 * time.Nanosecond, -+ elapsedTime: max == -1, -+ predictTime: true, -+ spinnerType: 9, -+ invisible: false, -+ spinnerChangeInterval: 100 * time.Millisecond, -+ showTotalBytes: true, - }, - } - -@@ -341,11 +425,13 @@ func NewOptions64(max int64, options ...Option) *ProgressBar { - panic(""invalid spinner type, must be between 0 and 75"") - } - -+ if b.config.maxDetailRow < 0 { -+ panic(""invalid max detail row, must be greater than 0"") -+ } -+ - // ignoreLength if max bytes not known - if b.config.max == -1 { -- b.config.ignoreLength = true -- b.config.max = int64(b.config.width) -- b.config.predictTime = false -+ b.lengthUnknown() - } - - b.config.maxHumanized, b.config.maxHumanizedSuffix = humanizeBytes(float64(b.config.max), -@@ -355,6 +441,27 @@ func NewOptions64(max int64, options ...Option) *ProgressBar { - b.RenderBlank() - } - -+ // if the render time interval attribute is set -+ if b.config.spinnerChangeInterval != 0 && !b.config.invisible && b.config.ignoreLength { -+ go func() { -+ ticker := time.NewTicker(b.config.spinnerChangeInterval) -+ defer ticker.Stop() -+ for { -+ select { -+ case <-ticker.C: -+ if b.IsFinished() { -+ return -+ } -+ if b.IsStarted() { -+ b.lock.Lock() -+ b.render() -+ b.lock.Unlock() -+ } -+ } -+ } -+ }() -+ } -+ - return &b - } - -@@ -386,6 +493,7 @@ func DefaultBytes(maxBytes int64, description ...string) *ProgressBar { - OptionSetDescription(desc), - OptionSetWriter(os.Stderr), - OptionShowBytes(true), -+ OptionShowTotalBytes(true), - OptionSetWidth(10), - OptionThrottle(65*time.Millisecond), - OptionShowCount(), -@@ -412,6 +520,7 @@ func DefaultBytesSilent(maxBytes int64, description ...string) *ProgressBar { - OptionSetDescription(desc), - OptionSetWriter(io.Discard), - OptionShowBytes(true), -+ OptionShowTotalBytes(true), - OptionSetWidth(10), - OptionThrottle(65*time.Millisecond), - OptionShowCount(), -@@ -432,6 +541,7 @@ func Default(max int64, description ...string) *ProgressBar { - OptionSetDescription(desc), - OptionSetWriter(os.Stderr), - OptionSetWidth(10), -+ OptionShowTotalBytes(true), - OptionThrottle(65*time.Millisecond), - OptionShowCount(), - OptionShowIts(), -@@ -458,6 +568,7 @@ func DefaultSilent(max int64, description ...string) *ProgressBar { - OptionSetDescription(desc), - OptionSetWriter(io.Discard), - OptionSetWidth(10), -+ OptionShowTotalBytes(true), - OptionThrottle(65*time.Millisecond), - OptionShowCount(), - OptionShowIts(), -@@ -486,6 +597,24 @@ func (p *ProgressBar) RenderBlank() error { - return p.render() - } - -+// StartWithoutRender will start the progress bar without rendering it -+// this method is created for the use case where you want to start the progress -+// but don't want to render it immediately. -+// If you want to start the progress and render it immediately, use RenderBlank instead, -+// or maybe you can use Add to start it automatically, but it will make the time calculation less precise. -+func (p *ProgressBar) StartWithoutRender() { -+ p.lock.Lock() -+ defer p.lock.Unlock() -+ -+ if p.IsStarted() { -+ return -+ } -+ -+ p.state.startTime = time.Now() -+ // the counterTime should be set to the current time -+ p.state.counterTime = time.Now() -+} -+ - // Reset will reset the clock that is used - // to calculate current time and the time left. - func (p *ProgressBar) Reset() { -@@ -567,6 +696,10 @@ func (p *ProgressBar) Add64(num int64) error { - - p.state.currentBytes += float64(num) - -+ if p.state.counterTime.IsZero() { -+ p.state.counterTime = time.Now() -+ } -+ - // reset the countdown timer every second to take rolling average - p.state.counterNumSinceLast += num - if time.Since(p.state.counterTime).Seconds() > 0.5 { -@@ -596,6 +729,66 @@ func (p *ProgressBar) Add64(num int64) error { - return nil - } - -+// AddDetail adds a detail to the progress bar. Only used when maxDetailRow is set to a value greater than 0 -+func (p *ProgressBar) AddDetail(detail string) error { -+ if p.config.maxDetailRow == 0 { -+ return errors.New(""maxDetailRow is set to 0, cannot add detail"") -+ } -+ if p.IsFinished() { -+ return errors.New(""cannot add detail to a finished progress bar"") -+ } -+ -+ p.lock.Lock() -+ defer p.lock.Unlock() -+ if p.state.details == nil { -+ // if we add a detail before the first add, it will be weird that we have detail but don't have the progress bar in the top. -+ // so when we add the first detail, we will render the progress bar first. -+ if err := p.render(); err != nil { -+ return err -+ } -+ } -+ p.state.details = append(p.state.details, detail) -+ if len(p.state.details) > p.config.maxDetailRow { -+ p.state.details = p.state.details[1:] -+ } -+ if err := p.renderDetails(); err != nil { -+ return err -+ } -+ return nil -+} -+ -+// renderDetails renders the details of the progress bar -+func (p *ProgressBar) renderDetails() error { -+ if p.config.invisible { -+ return nil -+ } -+ if p.state.finished { -+ return nil -+ } -+ if p.config.maxDetailRow == 0 { -+ return nil -+ } -+ -+ b := strings.Builder{} -+ b.WriteString(""\n"") -+ -+ // render the details row -+ for _, detail := range p.state.details { -+ b.WriteString(fmt.Sprintf(""\u001B[K\r%s\n"", detail)) -+ } -+ // add empty lines to fill the maxDetailRow -+ for i := len(p.state.details); i < p.config.maxDetailRow; i++ { -+ b.WriteString(""\u001B[K\n"") -+ } -+ -+ // move the cursor up to the start of the details row -+ b.WriteString(fmt.Sprintf(""\u001B[%dF"", p.config.maxDetailRow+1)) -+ -+ writeString(p.config, b.String()) -+ -+ return nil -+} -+ - // Clear erases the progress bar from the current line - func (p *ProgressBar) Clear() error { - return clearProgressBar(p.config, p.state) -@@ -656,6 +849,7 @@ func (p *ProgressBar) ChangeMax64(newMax int64) { - p.config.useIECUnits) - } - -+ p.lengthKnown(newMax) - p.lock.Unlock() // so p.Add can lock - - p.Add(0) // re-render -@@ -669,22 +863,31 @@ func (p *ProgressBar) IsFinished() bool { - return p.state.finished - } - -+// IsStarted returns true if progress bar is started -+func (p *ProgressBar) IsStarted() bool { -+ return !p.state.startTime.IsZero() -+} -+ - // render renders the progress bar, updating the maximum - // rendered line width. this function is not thread-safe, - // so it must be called with an acquired lock. - func (p *ProgressBar) render() error { - // make sure that the rendering is not happening too quickly - // but always show if the currentNum reaches the max -- if time.Since(p.state.lastShown).Nanoseconds() < p.config.throttleDuration.Nanoseconds() && -+ if !p.IsStarted() { -+ p.state.startTime = time.Now() -+ } else if time.Since(p.state.lastShown).Nanoseconds() < p.config.throttleDuration.Nanoseconds() && - p.state.currentNum < p.config.max { - return nil - } - - if !p.config.useANSICodes { -- // first, clear the existing progress bar -- err := clearProgressBar(p.config, p.state) -- if err != nil { -- return err -+ // first, clear the existing progress bar, if not yet finished. -+ if !p.state.finished { -+ err := clearProgressBar(p.config, p.state) -+ if err != nil { -+ return err -+ } - } - } - -@@ -695,6 +898,11 @@ func (p *ProgressBar) render() error { - io.Copy(p.config.writer, &p.config.stdBuffer) - renderProgressBar(p.config, &p.state) - } -+ if p.config.maxDetailRow > 0 { -+ p.renderDetails() -+ // put the cursor back to the last line of the details -+ writeString(p.config, fmt.Sprintf(""\u001B[%dB\r\u001B[%dC"", p.config.maxDetailRow, len(p.state.details[len(p.state.details)-1]))) -+ } - if p.config.onCompletion != nil { - p.config.onCompletion() - } -@@ -726,6 +934,20 @@ func (p *ProgressBar) render() error { - return nil - } - -+// lengthUnknown sets the progress bar to ignore the length -+func (p *ProgressBar) lengthUnknown() { -+ p.config.ignoreLength = true -+ p.config.max = int64(p.config.width) -+ p.config.predictTime = false -+} -+ -+// lengthKnown sets the progress bar to do not ignore the length -+func (p *ProgressBar) lengthKnown(max int64) { -+ p.config.ignoreLength = false -+ p.config.max = max -+ p.config.predictTime = true -+} -+ - // State returns the current state - func (p *ProgressBar) State() State { - p.lock.Lock() -@@ -738,7 +960,12 @@ func (p *ProgressBar) State() State { - } - s.CurrentPercent = float64(p.state.currentNum) / float64(p.config.max) - s.CurrentBytes = p.state.currentBytes -- s.SecondsSince = time.Since(p.state.startTime).Seconds() -+ if p.IsStarted() { -+ s.SecondsSince = time.Since(p.state.startTime).Seconds() -+ } else { -+ s.SecondsSince = 0 -+ } -+ - if p.state.currentNum > 0 { - s.SecondsLeft = s.SecondsSince / float64(p.state.currentNum) * (float64(p.config.max) - float64(p.state.currentNum)) - } -@@ -798,21 +1025,32 @@ func renderProgressBar(c config, s *state) (int, error) { - if c.showBytes { - currentHumanize, currentSuffix := humanizeBytes(s.currentBytes, c.useIECUnits) - if currentSuffix == c.maxHumanizedSuffix { -- sb.WriteString(fmt.Sprintf(""%s/%s%s"", -- currentHumanize, c.maxHumanized, c.maxHumanizedSuffix)) -- } else { -+ if c.showTotalBytes { -+ sb.WriteString(fmt.Sprintf(""%s/%s%s"", -+ currentHumanize, c.maxHumanized, c.maxHumanizedSuffix)) -+ } else { -+ sb.WriteString(fmt.Sprintf(""%s%s"", -+ currentHumanize, c.maxHumanizedSuffix)) -+ } -+ } else if c.showTotalBytes { - sb.WriteString(fmt.Sprintf(""%s%s/%s%s"", - currentHumanize, currentSuffix, c.maxHumanized, c.maxHumanizedSuffix)) -+ } else { -+ sb.WriteString(fmt.Sprintf(""%s%s"", currentHumanize, currentSuffix)) - } -- } else { -+ } else if c.showTotalBytes { - sb.WriteString(fmt.Sprintf(""%.0f/%d"", s.currentBytes, c.max)) -+ } else { -+ sb.WriteString(fmt.Sprintf(""%.0f"", s.currentBytes)) - } - } else { - if c.showBytes { - currentHumanize, currentSuffix := humanizeBytes(s.currentBytes, c.useIECUnits) - sb.WriteString(fmt.Sprintf(""%s%s"", currentHumanize, currentSuffix)) -- } else { -+ } else if c.showTotalBytes { - sb.WriteString(fmt.Sprintf(""%.0f/%s"", s.currentBytes, ""-"")) -+ } else { -+ sb.WriteString(fmt.Sprintf(""%.0f"", s.currentBytes)) - } - } - } -@@ -848,6 +1086,10 @@ func renderProgressBar(c config, s *state) (int, error) { - } - - leftBrac, rightBrac, saucer, saucerHead := """", """", """", """" -+ barStart, barEnd := c.theme.BarStart, c.theme.BarEnd -+ if s.finished && c.theme.BarEndFilled != """" { -+ barEnd = c.theme.BarEndFilled -+ } - - // show time prediction in ""current/total"" seconds format - switch { -@@ -884,6 +1126,9 @@ func renderProgressBar(c config, s *state) (int, error) { - c.width = width - getStringWidth(c, c.description, true) - 10 - amend - sb.Len() - len(leftBrac) - len(rightBrac) - s.currentSaucerSize = int(float64(s.currentPercent) / 100.0 * float64(c.width)) - } -+ if (s.currentSaucerSize > 0 || s.currentPercent > 0) && c.theme.BarStartFilled != """" { -+ barStart = c.theme.BarStartFilled -+ } - if s.currentSaucerSize > 0 { - if c.ignoreLength { - saucer = strings.Repeat(c.theme.SaucerPadding, s.currentSaucerSize-1) -@@ -925,7 +1170,16 @@ func renderProgressBar(c config, s *state) (int, error) { - if len(c.spinner) > 0 { - selectedSpinner = c.spinner - } -- spinner := selectedSpinner[int(math.Round(math.Mod(float64(time.Since(s.startTime).Milliseconds()/100), float64(len(selectedSpinner)))))] -+ -+ var spinner string -+ if c.spinnerChangeInterval != 0 { -+ // if the spinner is changed according to an interval, calculate it -+ spinner = selectedSpinner[int(math.Round(math.Mod(float64(time.Since(s.startTime).Nanoseconds()/c.spinnerChangeInterval.Nanoseconds()), float64(len(selectedSpinner)))))] -+ } else { -+ // if the spinner is changed according to the number render was called -+ spinner = selectedSpinner[s.spinnerIdx] -+ s.spinnerIdx = (s.spinnerIdx + 1) % len(selectedSpinner) -+ } - if c.elapsedTime { - if c.showDescriptionAtLineEnd { - str = fmt.Sprintf(""\r%s %s [%s] %s "", -@@ -956,11 +1210,11 @@ func renderProgressBar(c config, s *state) (int, error) { - } else if rightBrac == """" { - str = fmt.Sprintf(""%4d%% %s%s%s%s%s %s"", - s.currentPercent, -- c.theme.BarStart, -+ barStart, - saucer, - saucerHead, - strings.Repeat(c.theme.SaucerPadding, repeatAmount), -- c.theme.BarEnd, -+ barEnd, - sb.String()) - if (s.currentPercent == 100 && c.showElapsedTimeOnFinish) || c.elapsedTime { - str = fmt.Sprintf(""%s [%s]"", str, leftBrac) -@@ -975,11 +1229,11 @@ func renderProgressBar(c config, s *state) (int, error) { - if s.currentPercent == 100 { - str = fmt.Sprintf(""%4d%% %s%s%s%s%s %s"", - s.currentPercent, -- c.theme.BarStart, -+ barStart, - saucer, - saucerHead, - strings.Repeat(c.theme.SaucerPadding, repeatAmount), -- c.theme.BarEnd, -+ barEnd, - sb.String()) - - if c.showElapsedTimeOnFinish { -@@ -994,11 +1248,11 @@ func renderProgressBar(c config, s *state) (int, error) { - } else { - str = fmt.Sprintf(""%4d%% %s%s%s%s%s %s [%s:%s]"", - s.currentPercent, -- c.theme.BarStart, -+ barStart, - saucer, - saucerHead, - strings.Repeat(c.theme.SaucerPadding, repeatAmount), -- c.theme.BarEnd, -+ barEnd, - sb.String(), - leftBrac, - rightBrac) -@@ -1153,6 +1407,8 @@ func shouldCacheOutput(pb *ProgressBar) bool { - } - - func Bprintln(pb *ProgressBar, a ...interface{}) (int, error) { -+ pb.lock.Lock() -+ defer pb.lock.Unlock() - if !shouldCacheOutput(pb) { - return fmt.Fprintln(pb.config.writer, a...) - } else { -@@ -1161,6 +1417,8 @@ func Bprintln(pb *ProgressBar, a ...interface{}) (int, error) { - } - - func Bprintf(pb *ProgressBar, format string, a ...interface{}) (int, error) { -+ pb.lock.Lock() -+ defer pb.lock.Unlock() - if !shouldCacheOutput(pb) { - return fmt.Fprintf(pb.config.writer, format, a...) - } else { -diff --git a/vendor/modules.txt b/vendor/modules.txt -index 4ab7c446ea315..9b50cc3544026 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -1539,8 +1539,8 @@ github.com/rivo/uniseg - # github.com/rs/xid v1.6.0 - ## explicit; go 1.16 - github.com/rs/xid --# github.com/schollz/progressbar/v3 v3.14.6 --## explicit; go 1.13 -+# github.com/schollz/progressbar/v3 v3.17.0 -+## explicit; go 1.22 - github.com/schollz/progressbar/v3 - # github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 - ## explicit",fix,"update module github.com/schollz/progressbar/v3 to v3.17.0 (#14720) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -ce574485cf4aef74ea6e548086740caec55d5c9e,2024-01-04 17:42:50,Kaviraj Kanagaraj,"feat(caching): Support caching `/series` and `/labels` query results (#11539) - -**What this PR does / why we need it**: -Add support for caching metadata queries (both series and labels). -caching happens after splitting similar to other types of queries. - -This pr adds the following configs to enable them. -``` -cache_series_results: true|false (default false) -cache_label_results: true|false (default false) -``` -And the cache backend for them can be configured using -`series_results_cache` and `label_results_cache` blocks under the -`query_range` section. - -Currently the split interval for metadata queries is fixed and defaults -to 24h, this pr makes it configurable by introducing -`split_metadata_queries_by_interval` - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - -**Checklist** -- [x] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [x] Documentation added -- [x] Tests updated -- [ ] `CHANGELOG.md` updated -- [ ] If the change is worth mentioning in the release notes, add -`add-to-release-notes` label -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/setup/upgrade/_index.md` -- [ ] For Helm chart changes bump the Helm chart version in -`production/helm/loki/Chart.yaml` and update -`production/helm/loki/CHANGELOG.md` and -`production/helm/loki/README.md`. [Example -PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) -- [ ] If the change is deprecating or removing a configuration option, -update the `deprecated-config.yaml` and `deleted-config.yaml` files -respectively in the `tools/deprecated-config-checker` directory. -[Example -PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) - ---------- - -Signed-off-by: Kaviraj -Co-authored-by: Ashwanth Goli ",False,"diff --git a/CHANGELOG.md b/CHANGELOG.md -index bd2ae7c12de55..4002d76c032e9 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -42,6 +42,7 @@ - * [10956](https://github.com/grafana/loki/pull/10956) **jeschkies** do not wrap requests but send pure Protobuf from frontend v2 via scheduler to querier when `-frontend.encoding=protobuf`. - * [10417](https://github.com/grafana/loki/pull/10417) **jeschkies** shard `quantile_over_time` range queries using probabilistic data structures. - * [11284](https://github.com/grafana/loki/pull/11284) **ashwanthgoli** Config: Adds `frontend.max-query-capacity` to tune per-tenant query capacity. -+* [11539](https://github.com/grafana/loki/pull/11539) **kaviraj,ashwanthgoli** Support caching /series and /labels query results - * [11545](https://github.com/grafana/loki/pull/11545) **dannykopping** Force correct memcached timeout when fetching chunks. - - ##### Fixes -diff --git a/cmd/loki/loki-local-with-memcached.yaml b/cmd/loki/loki-local-with-memcached.yaml -new file mode 100644 -index 0000000000000..d1b0ae1c2493c ---- /dev/null -+++ b/cmd/loki/loki-local-with-memcached.yaml -@@ -0,0 +1,87 @@ -+auth_enabled: false -+ -+server: -+ http_listen_port: 3100 -+ grpc_listen_port: 9096 -+ -+common: -+ instance_addr: 127.0.0.1 -+ path_prefix: /tmp/loki -+ storage: -+ filesystem: -+ chunks_directory: /tmp/loki/chunks -+ rules_directory: /tmp/loki/rules -+ replication_factor: 1 -+ ring: -+ kvstore: -+ store: inmemory -+ -+query_range: -+ align_queries_with_step: true -+ cache_index_stats_results: true -+ cache_results: true -+ cache_volume_results: true -+ cache_series_results: true -+ series_results_cache: -+ cache: -+ default_validity: 12h -+ memcached_client: -+ consistent_hash: true -+ addresses: ""dns+localhost:11211"" -+ max_idle_conns: 16 -+ timeout: 500ms -+ update_interval: 1m -+ index_stats_results_cache: -+ cache: -+ default_validity: 12h -+ memcached_client: -+ consistent_hash: true -+ addresses: ""dns+localhost:11211"" -+ max_idle_conns: 16 -+ timeout: 500ms -+ update_interval: 1m -+ max_retries: 5 -+ results_cache: -+ cache: -+ default_validity: 12h -+ memcached_client: -+ consistent_hash: true -+ addresses: ""dns+localhost:11211"" -+ max_idle_conns: 16 -+ timeout: 500ms -+ update_interval: 1m -+ volume_results_cache: -+ cache: -+ default_validity: 12h -+ memcached_client: -+ consistent_hash: true -+ addresses: ""dns+localhost:11211"" -+ max_idle_conns: 16 -+ timeout: 500ms -+ update_interval: 1m -+ -+schema_config: -+ configs: -+ - from: 2020-10-24 -+ store: tsdb -+ object_store: filesystem -+ schema: v12 -+ index: -+ prefix: index_ -+ period: 24h -+ -+ruler: -+ alertmanager_url: http://localhost:9093 -+ -+# By default, Loki will send anonymous, but uniquely-identifiable usage and configuration -+# analytics to Grafana Labs. These statistics are sent to https://stats.grafana.org/ -+# -+# Statistics help us better understand how Loki is used, and they show us performance -+# levels for most users. This helps us prioritize features and documentation. -+# For more information on what's sent, look at -+# https://github.com/grafana/loki/blob/main/pkg/analytics/stats.go -+# Refer to the buildReport method to see what goes into a report. -+# -+# If you would like to disable reporting, uncomment the following lines: -+#analytics: -+# reporting_enabled: false -diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md -index 898ca025ee97d..6998635c90cee 100644 ---- a/docs/sources/configure/_index.md -+++ b/docs/sources/configure/_index.md -@@ -880,6 +880,40 @@ volume_results_cache: - # compression. Supported values are: 'snappy' and ''. - # CLI flag: -frontend.volume-results-cache.compression - [compression: | default = """"] -+ -+# Cache series query results. -+# CLI flag: -querier.cache-series-results -+[cache_series_results: | default = false] -+ -+# If series_results_cache is not configured and cache_series_results is true, -+# the config for the results cache is used. -+series_results_cache: -+ # The cache block configures the cache backend. -+ # The CLI flags prefix for this block configuration is: -+ # frontend.series-results-cache -+ [cache: ] -+ -+ # Use compression in cache. The default is an empty value '', which disables -+ # compression. Supported values are: 'snappy' and ''. -+ # CLI flag: -frontend.series-results-cache.compression -+ [compression: | default = """"] -+ -+# Cache label query results. -+# CLI flag: -querier.cache-label-results -+[cache_label_results: | default = false] -+ -+# If label_results_cache is not configured and cache_label_results is true, the -+# config for the results cache is used. -+label_results_cache: -+ # The cache block configures the cache backend. -+ # The CLI flags prefix for this block configuration is: -+ # frontend.label-results-cache -+ [cache: ] -+ -+ # Use compression in cache. The default is an empty value '', which disables -+ # compression. Supported values are: 'snappy' and ''. -+ # CLI flag: -frontend.label-results-cache.compression -+ [compression: | default = """"] - ``` - - ### ruler -@@ -2844,6 +2878,12 @@ The `limits_config` block configures global and per-tenant limits in Loki. - # CLI flag: -querier.split-queries-by-interval - [split_queries_by_interval: | default = 1h] - -+# Split metadata queries by a time interval and execute in parallel. The value 0 -+# disables splitting metadata queries by time. This also determines how cache -+# keys are chosen when label/series result caching is enabled. -+# CLI flag: -querier.split-metadata-queries-by-interval -+[split_metadata_queries_by_interval: | default = 1d] -+ - # Limit queries that can be sharded. Queries within the time range of now and - # now minus this sharding lookback are not sharded. The default value of 0s - # disables the lookback, causing sharding of all queries at all times. -@@ -4283,6 +4323,8 @@ The cache block configures the cache backend. The supported CLI flags `` - - `bloom-gateway-client.cache` - - `frontend` - - `frontend.index-stats-results-cache` -+- `frontend.label-results-cache` -+- `frontend.series-results-cache` - - `frontend.volume-results-cache` - - `store.chunks-cache` - - `store.index-cache-read` -diff --git a/pkg/logql/metrics.go b/pkg/logql/metrics.go -index e86a85ba7bc9b..0a24e74145369 100644 ---- a/pkg/logql/metrics.go -+++ b/pkg/logql/metrics.go -@@ -222,6 +222,10 @@ func RecordLabelQueryMetrics( - ""query"", query, - ""query_hash"", util.HashedQuery(query), - ""total_entries"", stats.Summary.TotalEntriesReturned, -+ ""cache_label_results_req"", stats.Caches.LabelResult.EntriesRequested, -+ ""cache_label_results_hit"", stats.Caches.LabelResult.EntriesFound, -+ ""cache_label_results_stored"", stats.Caches.LabelResult.EntriesStored, -+ ""cache_label_results_download_time"", stats.Caches.LabelResult.CacheDownloadTime(), - ) - - execLatency.WithLabelValues(status, queryType, """").Observe(stats.Summary.ExecTime) -@@ -272,7 +276,12 @@ func RecordSeriesQueryMetrics(ctx context.Context, log log.Logger, start, end ti - ""status"", status, - ""match"", PrintMatches(match), - ""query_hash"", util.HashedQuery(PrintMatches(match)), -- ""total_entries"", stats.Summary.TotalEntriesReturned) -+ ""total_entries"", stats.Summary.TotalEntriesReturned, -+ ""cache_series_results_req"", stats.Caches.SeriesResult.EntriesRequested, -+ ""cache_series_results_hit"", stats.Caches.SeriesResult.EntriesFound, -+ ""cache_series_results_stored"", stats.Caches.SeriesResult.EntriesStored, -+ ""cache_series_results_download_time"", stats.Caches.SeriesResult.CacheDownloadTime(), -+ ) - - if shard != nil { - logValues = append(logValues, -diff --git a/pkg/logql/metrics_test.go b/pkg/logql/metrics_test.go -index 6d07040bb802a..efaead9afd0dc 100644 ---- a/pkg/logql/metrics_test.go -+++ b/pkg/logql/metrics_test.go -@@ -106,10 +106,18 @@ func TestLogLabelsQuery(t *testing.T) { - TotalBytesProcessed: 100000, - TotalEntriesReturned: 12, - }, -+ Caches: stats.Caches{ -+ LabelResult: stats.Cache{ -+ EntriesRequested: 2, -+ EntriesFound: 1, -+ EntriesStored: 1, -+ DownloadTime: 80, -+ }, -+ }, - }) - require.Regexp(t, - fmt.Sprintf( -- ""level=info org_id=foo traceID=%s sampled=true latency=slow query_type=labels splits=0 start=.* end=.* start_delta=1h0m0.* end_delta=.* length=1h0m0s duration=25.25s status=200 label=foo query= query_hash=2166136261 total_entries=12\n"", -+ ""level=info org_id=foo traceID=%s sampled=true latency=slow query_type=labels splits=0 start=.* end=.* start_delta=1h0m0.* end_delta=.* length=1h0m0s duration=25.25s status=200 label=foo query= query_hash=2166136261 total_entries=12 cache_label_results_req=2 cache_label_results_hit=1 cache_label_results_stored=1 cache_label_results_download_time=80ns\n"", - sp.Context().(jaeger.SpanContext).SpanID().String(), - ), - buf.String()) -@@ -132,10 +140,18 @@ func TestLogSeriesQuery(t *testing.T) { - TotalBytesProcessed: 100000, - TotalEntriesReturned: 10, - }, -+ Caches: stats.Caches{ -+ SeriesResult: stats.Cache{ -+ EntriesRequested: 2, -+ EntriesFound: 1, -+ EntriesStored: 1, -+ DownloadTime: 80, -+ }, -+ }, - }) - require.Regexp(t, - fmt.Sprintf( -- ""level=info org_id=foo traceID=%s sampled=true latency=slow query_type=series splits=0 start=.* end=.* start_delta=1h0m0.* end_delta=.* length=1h0m0s duration=25.25s status=200 match=\""{container_name=.*\""}:{app=.*}\"" query_hash=23523089 total_entries=10\n"", -+ ""level=info org_id=foo traceID=%s sampled=true latency=slow query_type=series splits=0 start=.* end=.* start_delta=1h0m0.* end_delta=.* length=1h0m0s duration=25.25s status=200 match=\""{container_name=.*\""}:{app=.*}\"" query_hash=23523089 total_entries=10 cache_series_results_req=2 cache_series_results_hit=1 cache_series_results_stored=1 cache_series_results_download_time=80ns\n"", - sp.Context().(jaeger.SpanContext).SpanID().String(), - ), - buf.String()) -diff --git a/pkg/logqlmodel/stats/context.go b/pkg/logqlmodel/stats/context.go -index 187bd88763a2d..518e7effb59e8 100644 ---- a/pkg/logqlmodel/stats/context.go -+++ b/pkg/logqlmodel/stats/context.go -@@ -61,6 +61,8 @@ const ( - StatsResultCache = ""stats-result"" - VolumeResultCache = ""volume-result"" - WriteDedupeCache = ""write-dedupe"" -+ SeriesResultCache = ""series-result"" -+ LabelResultCache = ""label-result"" - BloomFilterCache = ""bloom-filter"" - BloomBlocksCache = ""bloom-blocks"" - ) -@@ -100,6 +102,8 @@ func (c *Context) Caches() Caches { - Result: c.caches.Result, - StatsResult: c.caches.StatsResult, - VolumeResult: c.caches.VolumeResult, -+ SeriesResult: c.caches.SeriesResult, -+ LabelResult: c.caches.LabelResult, - } - } - -@@ -215,6 +219,8 @@ func (c *Caches) Merge(m Caches) { - c.Result.Merge(m.Result) - c.StatsResult.Merge(m.StatsResult) - c.VolumeResult.Merge(m.VolumeResult) -+ c.SeriesResult.Merge(m.SeriesResult) -+ c.LabelResult.Merge(m.LabelResult) - } - - func (c *Cache) Merge(m Cache) { -@@ -444,6 +450,10 @@ func (c *Context) getCacheStatsByType(t CacheType) *Cache { - stats = &c.caches.StatsResult - case VolumeResultCache: - stats = &c.caches.VolumeResult -+ case SeriesResultCache: -+ stats = &c.caches.SeriesResult -+ case LabelResultCache: -+ stats = &c.caches.LabelResult - default: - return nil - } -@@ -526,6 +536,18 @@ func (c Caches) Log(log log.Logger) { - ""Cache.VolumeResult.EntriesStored"", c.VolumeResult.EntriesStored, - ""Cache.VolumeResult.BytesSent"", humanize.Bytes(uint64(c.VolumeResult.BytesSent)), - ""Cache.VolumeResult.BytesReceived"", humanize.Bytes(uint64(c.VolumeResult.BytesReceived)), -+ ""Cache.SeriesResult.Requests"", c.SeriesResult.Requests, -+ ""Cache.SeriesResult.EntriesRequested"", c.SeriesResult.EntriesRequested, -+ ""Cache.SeriesResult.EntriesFound"", c.SeriesResult.EntriesFound, -+ ""Cache.SeriesResult.EntriesStored"", c.SeriesResult.EntriesStored, -+ ""Cache.SeriesResult.BytesSent"", humanize.Bytes(uint64(c.SeriesResult.BytesSent)), -+ ""Cache.SeriesResult.BytesReceived"", humanize.Bytes(uint64(c.SeriesResult.BytesReceived)), -+ ""Cache.LabelResult.Requests"", c.LabelResult.Requests, -+ ""Cache.LabelResult.EntriesRequested"", c.LabelResult.EntriesRequested, -+ ""Cache.LabelResult.EntriesFound"", c.LabelResult.EntriesFound, -+ ""Cache.LabelResult.EntriesStored"", c.LabelResult.EntriesStored, -+ ""Cache.LabelResult.BytesSent"", humanize.Bytes(uint64(c.LabelResult.BytesSent)), -+ ""Cache.LabelResult.BytesReceived"", humanize.Bytes(uint64(c.LabelResult.BytesReceived)), - ""Cache.Result.DownloadTime"", c.Result.CacheDownloadTime(), - ""Cache.Result.Requests"", c.Result.Requests, - ""Cache.Result.EntriesRequested"", c.Result.EntriesRequested, -diff --git a/pkg/logqlmodel/stats/stats.pb.go b/pkg/logqlmodel/stats/stats.pb.go -index af008968ebd4a..7d2df4df33233 100644 ---- a/pkg/logqlmodel/stats/stats.pb.go -+++ b/pkg/logqlmodel/stats/stats.pb.go -@@ -100,6 +100,8 @@ type Caches struct { - Result Cache `protobuf:""bytes,3,opt,name=result,proto3"" json:""result""` - StatsResult Cache `protobuf:""bytes,4,opt,name=statsResult,proto3"" json:""statsResult""` - VolumeResult Cache `protobuf:""bytes,5,opt,name=volumeResult,proto3"" json:""volumeResult""` -+ SeriesResult Cache `protobuf:""bytes,6,opt,name=seriesResult,proto3"" json:""seriesResult""` -+ LabelResult Cache `protobuf:""bytes,7,opt,name=labelResult,proto3"" json:""labelResult""` - } - - func (m *Caches) Reset() { *m = Caches{} } -@@ -169,6 +171,20 @@ func (m *Caches) GetVolumeResult() Cache { - return Cache{} - } - -+func (m *Caches) GetSeriesResult() Cache { -+ if m != nil { -+ return m.SeriesResult -+ } -+ return Cache{} -+} -+ -+func (m *Caches) GetLabelResult() Cache { -+ if m != nil { -+ return m.LabelResult -+ } -+ return Cache{} -+} -+ - // Summary is the summary of a query statistics. - type Summary struct { - // Total bytes processed per second. -@@ -749,80 +765,82 @@ func init() { - func init() { proto.RegisterFile(""pkg/logqlmodel/stats/stats.proto"", fileDescriptor_6cdfe5d2aea33ebb) } - - var fileDescriptor_6cdfe5d2aea33ebb = []byte{ -- // 1163 bytes of a gzipped FileDescriptorProto -- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x57, 0x4d, 0x6f, 0xe4, 0x44, -- 0x13, 0x1e, 0x27, 0xaf, 0x67, 0xb2, 0x9d, 0xcf, 0xed, 0x64, 0xdf, 0xf5, 0x82, 0x64, 0x87, 0x81, -- 0x15, 0x41, 0xa0, 0x8c, 0xf8, 0x90, 0x10, 0x88, 0x95, 0x90, 0xb3, 0x44, 0x8a, 0xb4, 0x2b, 0x42, -- 0x05, 0x2e, 0xdc, 0x1c, 0xbb, 0x33, 0x63, 0xc5, 0x63, 0x4f, 0xec, 0xf6, 0xb2, 0x39, 0xc1, 0x4f, -- 0xe0, 0x67, 0x70, 0xe1, 0x80, 0x38, 0x21, 0xf1, 0x03, 0xf6, 0x98, 0xe3, 0x9e, 0x2c, 0x32, 0xb9, -- 0x20, 0x9f, 0x56, 0xe2, 0x8e, 0x50, 0x57, 0xf7, 0xf8, 0x6b, 0x3c, 0xda, 0x5c, 0xc6, 0x5d, 0x4f, -- 0x3d, 0x4f, 0xf5, 0x67, 0x55, 0xf7, 0x90, 0xdd, 0xc9, 0xf9, 0x70, 0x10, 0x44, 0xc3, 0x8b, 0x60, -- 0x1c, 0x79, 0x2c, 0x18, 0x24, 0xdc, 0xe1, 0x89, 0xfc, 0xdd, 0x9f, 0xc4, 0x11, 0x8f, 0xa8, 0x8e, -- 0xc6, 0x1b, 0x3b, 0xc3, 0x68, 0x18, 0x21, 0x32, 0x10, 0x2d, 0xe9, 0xec, 0xff, 0xa3, 0x91, 0x2e, -- 0xb0, 0x24, 0x0d, 0x38, 0xfd, 0x8c, 0xf4, 0x92, 0x74, 0x3c, 0x76, 0xe2, 0x4b, 0x43, 0xdb, 0xd5, -- 0xf6, 0x56, 0x3f, 0xda, 0xd8, 0x97, 0x61, 0x4e, 0x24, 0x6a, 0x6f, 0xbe, 0xc8, 0xac, 0x4e, 0x9e, -- 0x59, 0x33, 0x1a, 0xcc, 0x1a, 0x42, 0x7a, 0x91, 0xb2, 0xd8, 0x67, 0xb1, 0xb1, 0x54, 0x93, 0x7e, -- 0x23, 0xd1, 0x52, 0xaa, 0x68, 0x30, 0x6b, 0xd0, 0x47, 0x64, 0xc5, 0x0f, 0x87, 0x2c, 0xe1, 0x2c, -- 0x36, 0x96, 0x51, 0xbb, 0xa9, 0xb4, 0x47, 0x0a, 0xb6, 0xb7, 0x94, 0xb8, 0x20, 0x42, 0xd1, 0xa2, -- 0x9f, 0x90, 0xae, 0xeb, 0xb8, 0x23, 0x96, 0x18, 0xff, 0x43, 0xf1, 0xba, 0x12, 0x1f, 0x20, 0x68, -- 0xaf, 0x2b, 0xa9, 0x8e, 0x24, 0x50, 0xdc, 0xfe, 0x6f, 0x4b, 0xa4, 0x2b, 0x19, 0xf4, 0x43, 0xa2, -- 0xbb, 0xa3, 0x34, 0x3c, 0x57, 0x73, 0x5e, 0xab, 0xea, 0x2b, 0x72, 0x41, 0x01, 0xf9, 0x11, 0x12, -- 0x3f, 0xf4, 0xd8, 0x73, 0x35, 0xd7, 0x05, 0x12, 0xa4, 0x80, 0xfc, 0x88, 0x61, 0xc6, 0xb8, 0xca, -- 0x6a, 0x8e, 0x75, 0xcd, 0x86, 0xd2, 0x28, 0x0e, 0xa8, 0x2f, 0x3d, 0x20, 0xab, 0x48, 0x93, 0x1b, -- 0xa4, 0x66, 0x58, 0x97, 0x6e, 0x2b, 0x69, 0x95, 0x08, 0x55, 0x83, 0x1e, 0x92, 0xb5, 0x67, 0x51, -- 0x90, 0x8e, 0x99, 0x8a, 0xa2, 0xb7, 0x44, 0xd9, 0x51, 0x51, 0x6a, 0x4c, 0xa8, 0x59, 0xfd, 0x3f, -- 0xba, 0xa4, 0xa7, 0x4e, 0x02, 0xfd, 0x8e, 0xdc, 0x3f, 0xbd, 0xe4, 0x2c, 0x39, 0x8e, 0x23, 0x97, -- 0x25, 0x09, 0xf3, 0x8e, 0x59, 0x7c, 0xc2, 0xdc, 0x28, 0xf4, 0x70, 0x19, 0x97, 0xed, 0x37, 0xf3, -- 0xcc, 0x5a, 0x44, 0x81, 0x45, 0x0e, 0x11, 0x36, 0xf0, 0xc3, 0xd6, 0xb0, 0x4b, 0x65, 0xd8, 0x05, -- 0x14, 0x58, 0xe4, 0xa0, 0x47, 0x64, 0x9b, 0x47, 0xdc, 0x09, 0xec, 0x5a, 0xb7, 0xb8, 0x13, 0xcb, -- 0xf6, 0xfd, 0x3c, 0xb3, 0xda, 0xdc, 0xd0, 0x06, 0x16, 0xa1, 0x9e, 0xd4, 0xba, 0xc2, 0x9d, 0xa9, -- 0x86, 0xaa, 0xbb, 0xa1, 0x0d, 0xa4, 0x7b, 0x64, 0x85, 0x3d, 0x67, 0xee, 0xb7, 0xfe, 0x98, 0xe1, -- 0x9e, 0x68, 0xf6, 0x9a, 0x38, 0xe3, 0x33, 0x0c, 0x8a, 0x16, 0x7d, 0x9f, 0xdc, 0xb9, 0x48, 0x59, -- 0xca, 0x90, 0xda, 0x45, 0xea, 0x7a, 0x9e, 0x59, 0x25, 0x08, 0x65, 0x93, 0xee, 0x13, 0x92, 0xa4, -- 0xa7, 0x32, 0xbb, 0x12, 0xa3, 0x87, 0x03, 0xdb, 0xc8, 0x33, 0xab, 0x82, 0x42, 0xa5, 0x4d, 0x9f, -- 0x90, 0x1d, 0x1c, 0xdd, 0x57, 0x21, 0x47, 0x1f, 0xe3, 0x69, 0x1c, 0x32, 0xcf, 0x58, 0x41, 0xa5, -- 0x91, 0x67, 0x56, 0xab, 0x1f, 0x5a, 0x51, 0xda, 0x27, 0xdd, 0x64, 0x12, 0xf8, 0x3c, 0x31, 0xee, -- 0xa0, 0x9e, 0x88, 0x53, 0x2d, 0x11, 0x50, 0x5f, 0xe4, 0x8c, 0x9c, 0xd8, 0x4b, 0x0c, 0x52, 0xe1, -- 0x20, 0x02, 0xea, 0x5b, 0x8c, 0xea, 0x38, 0x4a, 0xf8, 0xa1, 0x1f, 0x70, 0x16, 0xe3, 0xea, 0x19, -- 0xab, 0x8d, 0x51, 0x35, 0xfc, 0xd0, 0x8a, 0xd2, 0x1f, 0xc9, 0x43, 0xc4, 0x4f, 0x78, 0x9c, 0xba, -- 0x3c, 0x8d, 0x99, 0xf7, 0x94, 0x71, 0xc7, 0x73, 0xb8, 0xd3, 0x38, 0x12, 0x6b, 0x18, 0xfe, 0xbd, -- 0x3c, 0xb3, 0x6e, 0x27, 0x80, 0xdb, 0xd1, 0xfa, 0x5f, 0x90, 0x9e, 0xaa, 0x84, 0xa2, 0x78, 0x24, -- 0x3c, 0x8a, 0x59, 0xa3, 0xde, 0x9c, 0x08, 0xac, 0x2c, 0x1e, 0x48, 0x01, 0xf9, 0xe9, 0xff, 0xba, -- 0x44, 0x56, 0x8e, 0xca, 0x82, 0xb7, 0x86, 0x7d, 0x02, 0x13, 0x99, 0x2b, 0xf3, 0x4d, 0xb7, 0xb7, -- 0x44, 0xf2, 0x56, 0x71, 0xa8, 0x59, 0xf4, 0x90, 0x50, 0xb4, 0x0f, 0x44, 0x01, 0x4b, 0x9e, 0x3a, -- 0x1c, 0xb5, 0x32, 0xa9, 0xfe, 0x9f, 0x67, 0x56, 0x8b, 0x17, 0x5a, 0xb0, 0xa2, 0x77, 0x1b, 0xed, -- 0x44, 0xe5, 0x50, 0xd9, 0xbb, 0xc2, 0xa1, 0x66, 0xd1, 0xcf, 0xc9, 0x46, 0x99, 0x01, 0x27, 0x2c, -- 0xe4, 0x2a, 0x61, 0x68, 0x9e, 0x59, 0x0d, 0x0f, 0x34, 0xec, 0x72, 0xbd, 0xf4, 0x5b, 0xaf, 0xd7, -- 0x9f, 0xcb, 0x44, 0x47, 0x7f, 0xd1, 0xb1, 0x9c, 0x04, 0xb0, 0x33, 0x55, 0x9e, 0xca, 0x8e, 0x0b, -- 0x0f, 0x34, 0x6c, 0xfa, 0x35, 0xb9, 0x57, 0x41, 0x1e, 0x47, 0x3f, 0x84, 0x41, 0xe4, 0x78, 0xc5, -- 0xaa, 0x3d, 0xc8, 0x33, 0xab, 0x9d, 0x00, 0xed, 0xb0, 0xd8, 0x03, 0xb7, 0x86, 0x61, 0x3e, 0x2f, -- 0x97, 0x7b, 0x30, 0xef, 0x85, 0x16, 0x8c, 0xba, 0xe4, 0x81, 0x48, 0xde, 0x4b, 0x60, 0x67, 0x2c, -- 0x66, 0xa1, 0xcb, 0xbc, 0xf2, 0xfc, 0x19, 0xeb, 0xbb, 0xda, 0xde, 0x8a, 0xfd, 0x30, 0xcf, 0xac, -- 0xb7, 0x16, 0x92, 0x66, 0x87, 0x14, 0x16, 0xc7, 0x29, 0xaf, 0xc5, 0xc6, 0xa5, 0x23, 0xb0, 0x05, -- 0xd7, 0xe2, 0x6c, 0x7e, 0xc0, 0xce, 0x92, 0x43, 0xc6, 0xdd, 0x51, 0x51, 0xda, 0xaa, 0xf3, 0xab, -- 0x79, 0xa1, 0x05, 0xeb, 0xff, 0xae, 0x13, 0x1d, 0xfb, 0x11, 0xdb, 0x37, 0x62, 0x8e, 0x27, 0x3b, -- 0x15, 0x19, 0x55, 0x3d, 0x37, 0x75, 0x0f, 0x34, 0xec, 0x9a, 0x56, 0xd6, 0x0e, 0xbd, 0x45, 0x2b, -- 0xab, 0x46, 0xc3, 0xa6, 0x07, 0xe4, 0xae, 0xc7, 0xdc, 0x68, 0x3c, 0x89, 0x31, 0x7d, 0x65, 0xd7, -- 0x5d, 0x94, 0xdf, 0xcb, 0x33, 0x6b, 0xde, 0x09, 0xf3, 0x50, 0x33, 0x88, 0x1c, 0x43, 0xaf, 0x3d, -- 0x88, 0x1c, 0xc6, 0x3c, 0x44, 0x1f, 0x91, 0xcd, 0xe6, 0x38, 0x64, 0x61, 0xde, 0xce, 0x33, 0xab, -- 0xe9, 0x82, 0x26, 0x20, 0xe4, 0x78, 0x16, 0x1f, 0xa7, 0x93, 0xc0, 0x77, 0x1d, 0x21, 0xbf, 0x53, -- 0xca, 0x1b, 0x2e, 0x68, 0x02, 0x42, 0x3e, 0x69, 0x14, 0x60, 0x52, 0xca, 0x1b, 0x2e, 0x68, 0x02, -- 0x74, 0x42, 0x76, 0x8b, 0x85, 0x5d, 0x50, 0x22, 0x55, 0x41, 0x7f, 0x27, 0xcf, 0xac, 0xd7, 0x72, -- 0xe1, 0xb5, 0x0c, 0x7a, 0x49, 0xde, 0xae, 0xae, 0xe1, 0xa2, 0x4e, 0x65, 0x99, 0x7f, 0x37, 0xcf, -- 0xac, 0xdb, 0xd0, 0xe1, 0x36, 0xa4, 0xfe, 0xbf, 0x4b, 0x44, 0xc7, 0xc7, 0x94, 0xa8, 0x91, 0x4c, -- 0x5e, 0x8b, 0x87, 0x51, 0x1a, 0xd6, 0x2a, 0x74, 0x15, 0x87, 0x9a, 0x45, 0xbf, 0x24, 0x5b, 0x6c, -- 0x76, 0x99, 0x5e, 0xa4, 0xa2, 0xd6, 0xcb, 0x4a, 0xa3, 0xdb, 0x3b, 0x79, 0x66, 0xcd, 0xf9, 0x60, -- 0x0e, 0xa1, 0x9f, 0x92, 0x75, 0x85, 0x61, 0xf1, 0x93, 0x0f, 0x1c, 0xdd, 0xbe, 0x9b, 0x67, 0x56, -- 0xdd, 0x01, 0x75, 0x53, 0x08, 0xf1, 0x45, 0x06, 0xcc, 0x65, 0xfe, 0xb3, 0xe2, 0x39, 0x83, 0xc2, -- 0x9a, 0x03, 0xea, 0xa6, 0x78, 0x98, 0x20, 0x80, 0x25, 0x5d, 0xa6, 0x17, 0x3e, 0x4c, 0x0a, 0x10, -- 0xca, 0xa6, 0x78, 0xef, 0xc4, 0x72, 0xac, 0x32, 0x97, 0x74, 0xf9, 0xde, 0x99, 0x61, 0x50, 0xb4, -- 0xc4, 0x02, 0x7a, 0xd5, 0x12, 0xd9, 0x2b, 0x2f, 0x99, 0x2a, 0x0e, 0x35, 0xcb, 0x3e, 0xbd, 0xba, -- 0x36, 0x3b, 0x2f, 0xaf, 0xcd, 0xce, 0xab, 0x6b, 0x53, 0xfb, 0x69, 0x6a, 0x6a, 0xbf, 0x4c, 0x4d, -- 0xed, 0xc5, 0xd4, 0xd4, 0xae, 0xa6, 0xa6, 0xf6, 0xd7, 0xd4, 0xd4, 0xfe, 0x9e, 0x9a, 0x9d, 0x57, -- 0x53, 0x53, 0xfb, 0xf9, 0xc6, 0xec, 0x5c, 0xdd, 0x98, 0x9d, 0x97, 0x37, 0x66, 0xe7, 0xfb, 0x0f, -- 0x86, 0x3e, 0x1f, 0xa5, 0xa7, 0xfb, 0x6e, 0x34, 0x1e, 0x0c, 0x63, 0xe7, 0xcc, 0x09, 0x9d, 0x41, -- 0x10, 0x9d, 0xfb, 0x83, 0xb6, 0xbf, 0x55, 0xa7, 0x5d, 0xfc, 0xd3, 0xf4, 0xf1, 0x7f, 0x01, 0x00, -- 0x00, 0xff, 0xff, 0x02, 0xba, 0xb6, 0x0a, 0x75, 0x0d, 0x00, 0x00, -+ // 1193 bytes of a gzipped FileDescriptorProto -+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x57, 0xcf, 0x6f, 0xdc, 0xc4, -+ 0x17, 0x5f, 0x67, 0xbf, 0xde, 0x4d, 0xa7, 0xf9, 0xd5, 0x49, 0xfa, 0xed, 0x16, 0x24, 0x3b, 0x2c, -+ 0x54, 0x04, 0x81, 0xb2, 0xe2, 0x87, 0x84, 0x40, 0x54, 0x42, 0x4e, 0x89, 0x14, 0xa9, 0x15, 0xe1, -+ 0x05, 0x2e, 0xdc, 0xbc, 0xf6, 0x64, 0xd7, 0x8a, 0xd7, 0xde, 0xf8, 0x47, 0x69, 0x4e, 0xf0, 0x27, -+ 0xf0, 0x67, 0x70, 0xe1, 0xc4, 0x09, 0x09, 0x71, 0xee, 0x31, 0xc7, 0x9e, 0x2c, 0xb2, 0xb9, 0x20, -+ 0x9f, 0x2a, 0x71, 0x47, 0x68, 0xde, 0xcc, 0xda, 0x1e, 0xaf, 0xb7, 0xcd, 0x65, 0x3d, 0xef, 0xf3, -+ 0x3e, 0x9f, 0x37, 0x3f, 0xdf, 0x9b, 0x59, 0xb2, 0x3b, 0x3d, 0x1b, 0x0d, 0xfc, 0x70, 0x74, 0xee, -+ 0x4f, 0x42, 0x97, 0xf9, 0x83, 0x38, 0xb1, 0x93, 0x58, 0xfc, 0xee, 0x4f, 0xa3, 0x30, 0x09, 0xa9, -+ 0x8e, 0xc6, 0x1b, 0x3b, 0xa3, 0x70, 0x14, 0x22, 0x32, 0xe0, 0x2d, 0xe1, 0xec, 0xff, 0xa3, 0x91, -+ 0x0e, 0xb0, 0x38, 0xf5, 0x13, 0xfa, 0x19, 0xe9, 0xc6, 0xe9, 0x64, 0x62, 0x47, 0x17, 0x3d, 0x6d, -+ 0x57, 0xdb, 0xbb, 0xfd, 0xd1, 0xc6, 0xbe, 0x08, 0x73, 0x22, 0x50, 0x6b, 0xf3, 0x79, 0x66, 0xb6, -+ 0xf2, 0xcc, 0x9c, 0xd3, 0x60, 0xde, 0xe0, 0xd2, 0xf3, 0x94, 0x45, 0x1e, 0x8b, 0x7a, 0x2b, 0x8a, -+ 0xf4, 0x1b, 0x81, 0x96, 0x52, 0x49, 0x83, 0x79, 0x83, 0x3e, 0x24, 0xab, 0x5e, 0x30, 0x62, 0x71, -+ 0xc2, 0xa2, 0x5e, 0x1b, 0xb5, 0x9b, 0x52, 0x7b, 0x24, 0x61, 0x6b, 0x4b, 0x8a, 0x0b, 0x22, 0x14, -+ 0x2d, 0xfa, 0x09, 0xe9, 0x38, 0xb6, 0x33, 0x66, 0x71, 0xef, 0x7f, 0x28, 0x5e, 0x97, 0xe2, 0x03, -+ 0x04, 0xad, 0x75, 0x29, 0xd5, 0x91, 0x04, 0x92, 0xdb, 0xff, 0xb3, 0x4d, 0x3a, 0x82, 0x41, 0x3f, -+ 0x24, 0xba, 0x33, 0x4e, 0x83, 0x33, 0x39, 0xe7, 0xb5, 0xaa, 0xbe, 0x22, 0xe7, 0x14, 0x10, 0x1f, -+ 0x2e, 0xf1, 0x02, 0x97, 0x3d, 0x93, 0x73, 0x5d, 0x22, 0x41, 0x0a, 0x88, 0x0f, 0x1f, 0x66, 0x84, -+ 0xab, 0x2c, 0xe7, 0xa8, 0x6a, 0x36, 0xa4, 0x46, 0x72, 0x40, 0x7e, 0xe9, 0x01, 0xb9, 0x8d, 0x34, -+ 0xb1, 0x41, 0x72, 0x86, 0xaa, 0x74, 0x5b, 0x4a, 0xab, 0x44, 0xa8, 0x1a, 0xf4, 0x90, 0xac, 0x3d, -+ 0x0d, 0xfd, 0x74, 0xc2, 0x64, 0x14, 0xbd, 0x21, 0xca, 0x8e, 0x8c, 0xa2, 0x30, 0x41, 0xb1, 0x78, -+ 0x9c, 0x98, 0x6f, 0xd9, 0x7c, 0x34, 0x9d, 0x57, 0xc5, 0xa9, 0x32, 0x41, 0xb1, 0xf8, 0xa4, 0x7c, -+ 0x7b, 0xc8, 0x7c, 0x19, 0xa6, 0xfb, 0xaa, 0x49, 0x55, 0x88, 0x50, 0x35, 0xfa, 0xbf, 0x77, 0x48, -+ 0x57, 0x1e, 0x4b, 0xfa, 0x1d, 0xb9, 0x37, 0xbc, 0x48, 0x58, 0x7c, 0x1c, 0x85, 0x0e, 0x8b, 0x63, -+ 0xe6, 0x1e, 0xb3, 0xe8, 0x84, 0x39, 0x61, 0xe0, 0xe2, 0x9e, 0xb6, 0xad, 0x37, 0xf3, 0xcc, 0x5c, -+ 0x46, 0x81, 0x65, 0x0e, 0x1e, 0xd6, 0xf7, 0x82, 0xc6, 0xb0, 0x2b, 0x65, 0xd8, 0x25, 0x14, 0x58, -+ 0xe6, 0xa0, 0x47, 0x64, 0x3b, 0x09, 0x13, 0xdb, 0xb7, 0x94, 0x6e, 0xf1, 0x58, 0xb4, 0xad, 0x7b, -+ 0x79, 0x66, 0x36, 0xb9, 0xa1, 0x09, 0x2c, 0x42, 0x3d, 0x56, 0xba, 0xc2, 0x63, 0x52, 0x0d, 0xa5, -+ 0xba, 0xa1, 0x09, 0xa4, 0x7b, 0x64, 0x95, 0x3d, 0x63, 0xce, 0xb7, 0xde, 0x84, 0xe1, 0x01, 0xd1, -+ 0xac, 0x35, 0x9e, 0x70, 0x73, 0x0c, 0x8a, 0x16, 0x7d, 0x9f, 0xdc, 0x3a, 0x4f, 0x59, 0xca, 0x90, -+ 0xda, 0x41, 0xea, 0x7a, 0x9e, 0x99, 0x25, 0x08, 0x65, 0x93, 0xee, 0x13, 0x12, 0xa7, 0x43, 0x91, -+ 0xea, 0x31, 0x6e, 0x75, 0xdb, 0xda, 0xc8, 0x33, 0xb3, 0x82, 0x42, 0xa5, 0x4d, 0x1f, 0x93, 0x1d, -+ 0x1c, 0xdd, 0x57, 0x41, 0x22, 0x4e, 0x4c, 0x92, 0x46, 0x01, 0x73, 0x7b, 0xab, 0xa8, 0xec, 0xe5, -+ 0x99, 0xd9, 0xe8, 0x87, 0x46, 0x94, 0xf6, 0x49, 0x27, 0x9e, 0xfa, 0x5e, 0x12, 0xf7, 0x6e, 0xa1, -+ 0x9e, 0xf0, 0x14, 0x13, 0x08, 0xc8, 0x2f, 0x72, 0xc6, 0x76, 0xe4, 0xc6, 0x3d, 0x52, 0xe1, 0x20, -+ 0x02, 0xf2, 0x5b, 0x8c, 0xea, 0x38, 0x8c, 0x93, 0x43, 0xcf, 0x4f, 0x58, 0x84, 0xab, 0xd7, 0xbb, -+ 0x5d, 0x1b, 0x55, 0xcd, 0x0f, 0x8d, 0x28, 0xfd, 0x91, 0x3c, 0x40, 0xfc, 0x24, 0x89, 0x52, 0x27, -+ 0x49, 0x23, 0xe6, 0x3e, 0x61, 0x89, 0xed, 0xda, 0x89, 0x5d, 0x3b, 0x12, 0x6b, 0x18, 0xfe, 0xbd, -+ 0x3c, 0x33, 0x6f, 0x26, 0x80, 0x9b, 0xd1, 0xfa, 0x5f, 0x90, 0xae, 0x2c, 0xcb, 0xbc, 0x92, 0xc5, -+ 0x49, 0x18, 0xb1, 0x5a, 0xf1, 0x3b, 0xe1, 0x58, 0x59, 0xc9, 0x90, 0x02, 0xe2, 0xd3, 0xff, 0x75, -+ 0x85, 0xac, 0x1e, 0x95, 0xd5, 0x77, 0x0d, 0xfb, 0x04, 0xc6, 0xf3, 0x56, 0xe4, 0x9b, 0x6e, 0x6d, -+ 0xf1, 0x0a, 0x50, 0xc5, 0x41, 0xb1, 0xe8, 0x21, 0xa1, 0x68, 0x1f, 0xf0, 0x6a, 0x1a, 0x3f, 0xb1, -+ 0x13, 0xd4, 0x8a, 0xa4, 0xfa, 0x7f, 0x9e, 0x99, 0x0d, 0x5e, 0x68, 0xc0, 0x8a, 0xde, 0x2d, 0xb4, -+ 0x63, 0x99, 0x43, 0x65, 0xef, 0x12, 0x07, 0xc5, 0xa2, 0x9f, 0x93, 0x8d, 0x32, 0x03, 0x4e, 0x58, -+ 0x90, 0xc8, 0x84, 0xa1, 0x79, 0x66, 0xd6, 0x3c, 0x50, 0xb3, 0xcb, 0xf5, 0xd2, 0x6f, 0xbc, 0x5e, -+ 0x7f, 0xb4, 0x89, 0x8e, 0xfe, 0xa2, 0x63, 0x31, 0x09, 0x60, 0xa7, 0xb2, 0x3c, 0x95, 0x1d, 0x17, -+ 0x1e, 0xa8, 0xd9, 0xf4, 0x6b, 0x72, 0xb7, 0x82, 0x3c, 0x0a, 0x7f, 0x08, 0xfc, 0xd0, 0x76, 0x8b, -+ 0x55, 0xbb, 0x9f, 0x67, 0x66, 0x33, 0x01, 0x9a, 0x61, 0xbe, 0x07, 0x8e, 0x82, 0x61, 0x3e, 0xb7, -+ 0xcb, 0x3d, 0x58, 0xf4, 0x42, 0x03, 0x46, 0x1d, 0x72, 0x9f, 0x27, 0xef, 0x05, 0xb0, 0x53, 0x16, -+ 0xb1, 0xc0, 0x61, 0x6e, 0x79, 0xfe, 0x7a, 0xeb, 0xbb, 0xda, 0xde, 0xaa, 0xf5, 0x20, 0xcf, 0xcc, -+ 0xb7, 0x96, 0x92, 0xe6, 0x87, 0x14, 0x96, 0xc7, 0x29, 0xef, 0xe8, 0xda, 0x0d, 0xc8, 0xb1, 0x25, -+ 0x77, 0xf4, 0x7c, 0x7e, 0xc0, 0x4e, 0xe3, 0x43, 0x96, 0x38, 0xe3, 0xa2, 0xb4, 0x55, 0xe7, 0xa7, -+ 0x78, 0xa1, 0x01, 0xeb, 0xff, 0xa6, 0x13, 0x1d, 0xfb, 0xe1, 0xdb, 0x37, 0x66, 0xb6, 0x2b, 0x3a, -+ 0xe5, 0x19, 0x55, 0x3d, 0x37, 0xaa, 0x07, 0x6a, 0xb6, 0xa2, 0x15, 0xb5, 0x43, 0x6f, 0xd0, 0x8a, -+ 0xaa, 0x51, 0xb3, 0xe9, 0x01, 0xb9, 0xe3, 0x32, 0x27, 0x9c, 0x4c, 0x23, 0x4c, 0x5f, 0xd1, 0x75, -+ 0x07, 0xe5, 0x77, 0xf3, 0xcc, 0x5c, 0x74, 0xc2, 0x22, 0x54, 0x0f, 0x22, 0xc6, 0xd0, 0x6d, 0x0e, -+ 0x22, 0x86, 0xb1, 0x08, 0xd1, 0x87, 0x64, 0xb3, 0x3e, 0x0e, 0x51, 0x98, 0xb7, 0xf3, 0xcc, 0xac, -+ 0xbb, 0xa0, 0x0e, 0x70, 0x39, 0x9e, 0xc5, 0x47, 0xe9, 0xd4, 0xf7, 0x1c, 0x9b, 0xcb, 0x6f, 0x95, -+ 0xf2, 0x9a, 0x0b, 0xea, 0x00, 0x97, 0x4f, 0x6b, 0x05, 0x98, 0x94, 0xf2, 0x9a, 0x0b, 0xea, 0x00, -+ 0x9d, 0x92, 0xdd, 0x62, 0x61, 0x97, 0x94, 0x48, 0x59, 0xd0, 0xdf, 0xc9, 0x33, 0xf3, 0xb5, 0x5c, -+ 0x78, 0x2d, 0x83, 0x5e, 0x90, 0xb7, 0xab, 0x6b, 0xb8, 0xac, 0x53, 0x51, 0xe6, 0xdf, 0xcd, 0x33, -+ 0xf3, 0x26, 0x74, 0xb8, 0x09, 0xa9, 0xff, 0xef, 0x0a, 0xd1, 0xf1, 0x29, 0xc5, 0x6b, 0x24, 0x13, -+ 0xd7, 0xe2, 0x61, 0x98, 0x06, 0x4a, 0x85, 0xae, 0xe2, 0xa0, 0x58, 0xf4, 0x4b, 0xb2, 0xc5, 0xe6, -+ 0x97, 0xe9, 0x79, 0xca, 0x6b, 0xbd, 0xa8, 0x34, 0xba, 0xb5, 0x93, 0x67, 0xe6, 0x82, 0x0f, 0x16, -+ 0x10, 0xfa, 0x29, 0x59, 0x97, 0x18, 0x16, 0x3f, 0xf1, 0xc0, 0xd1, 0xad, 0x3b, 0x79, 0x66, 0xaa, -+ 0x0e, 0x50, 0x4d, 0x2e, 0xc4, 0x17, 0x19, 0x30, 0x87, 0x79, 0x4f, 0x8b, 0xe7, 0x0c, 0x0a, 0x15, -+ 0x07, 0xa8, 0x26, 0x7f, 0x98, 0x20, 0x80, 0x25, 0x5d, 0xa4, 0x17, 0x3e, 0x4c, 0x0a, 0x10, 0xca, -+ 0x26, 0x7f, 0xef, 0x44, 0x62, 0xac, 0x22, 0x97, 0x74, 0xf1, 0xde, 0x99, 0x63, 0x50, 0xb4, 0xf8, -+ 0x02, 0xba, 0xd5, 0x12, 0xd9, 0x2d, 0x2f, 0x99, 0x2a, 0x0e, 0x8a, 0x65, 0x0d, 0x2f, 0xaf, 0x8c, -+ 0xd6, 0x8b, 0x2b, 0xa3, 0xf5, 0xf2, 0xca, 0xd0, 0x7e, 0x9a, 0x19, 0xda, 0x2f, 0x33, 0x43, 0x7b, -+ 0x3e, 0x33, 0xb4, 0xcb, 0x99, 0xa1, 0xfd, 0x35, 0x33, 0xb4, 0xbf, 0x67, 0x46, 0xeb, 0xe5, 0xcc, -+ 0xd0, 0x7e, 0xbe, 0x36, 0x5a, 0x97, 0xd7, 0x46, 0xeb, 0xc5, 0xb5, 0xd1, 0xfa, 0xfe, 0x83, 0x91, -+ 0x97, 0x8c, 0xd3, 0xe1, 0xbe, 0x13, 0x4e, 0x06, 0xa3, 0xc8, 0x3e, 0xb5, 0x03, 0x7b, 0xe0, 0x87, -+ 0x67, 0xde, 0xa0, 0xe9, 0x3f, 0xde, 0xb0, 0x83, 0xff, 0xe0, 0x3e, 0xfe, 0x2f, 0x00, 0x00, 0xff, -+ 0xff, 0x99, 0x60, 0xf2, 0x04, 0x02, 0x0e, 0x00, 0x00, - } - - func (this *Result) Equal(that interface{}) bool { -@@ -892,6 +910,12 @@ func (this *Caches) Equal(that interface{}) bool { - if !this.VolumeResult.Equal(&that1.VolumeResult) { - return false - } -+ if !this.SeriesResult.Equal(&that1.SeriesResult) { -+ return false -+ } -+ if !this.LabelResult.Equal(&that1.LabelResult) { -+ return false -+ } - return true - } - func (this *Summary) Equal(that interface{}) bool { -@@ -1157,13 +1181,15 @@ func (this *Caches) GoString() string { - if this == nil { - return ""nil"" - } -- s := make([]string, 0, 9) -+ s := make([]string, 0, 11) - s = append(s, ""&stats.Caches{"") - s = append(s, ""Chunk: ""+strings.Replace(this.Chunk.GoString(), `&`, ``, 1)+"",\n"") - s = append(s, ""Index: ""+strings.Replace(this.Index.GoString(), `&`, ``, 1)+"",\n"") - s = append(s, ""Result: ""+strings.Replace(this.Result.GoString(), `&`, ``, 1)+"",\n"") - s = append(s, ""StatsResult: ""+strings.Replace(this.StatsResult.GoString(), `&`, ``, 1)+"",\n"") - s = append(s, ""VolumeResult: ""+strings.Replace(this.VolumeResult.GoString(), `&`, ``, 1)+"",\n"") -+ s = append(s, ""SeriesResult: ""+strings.Replace(this.SeriesResult.GoString(), `&`, ``, 1)+"",\n"") -+ s = append(s, ""LabelResult: ""+strings.Replace(this.LabelResult.GoString(), `&`, ``, 1)+"",\n"") - s = append(s, ""}"") - return strings.Join(s, """") - } -@@ -1352,6 +1378,26 @@ func (m *Caches) MarshalToSizedBuffer(dAtA []byte) (int, error) { - _ = i - var l int - _ = l -+ { -+ size, err := m.LabelResult.MarshalToSizedBuffer(dAtA[:i]) -+ if err != nil { -+ return 0, err -+ } -+ i -= size -+ i = encodeVarintStats(dAtA, i, uint64(size)) -+ } -+ i-- -+ dAtA[i] = 0x3a -+ { -+ size, err := m.SeriesResult.MarshalToSizedBuffer(dAtA[:i]) -+ if err != nil { -+ return 0, err -+ } -+ i -= size -+ i = encodeVarintStats(dAtA, i, uint64(size)) -+ } -+ i-- -+ dAtA[i] = 0x32 - { - size, err := m.VolumeResult.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { -@@ -1809,6 +1855,10 @@ func (m *Caches) Size() (n int) { - n += 1 + l + sovStats(uint64(l)) - l = m.VolumeResult.Size() - n += 1 + l + sovStats(uint64(l)) -+ l = m.SeriesResult.Size() -+ n += 1 + l + sovStats(uint64(l)) -+ l = m.LabelResult.Size() -+ n += 1 + l + sovStats(uint64(l)) - return n - } - -@@ -2012,6 +2062,8 @@ func (this *Caches) String() string { - `Result:` + strings.Replace(strings.Replace(this.Result.String(), ""Cache"", ""Cache"", 1), `&`, ``, 1) + `,`, - `StatsResult:` + strings.Replace(strings.Replace(this.StatsResult.String(), ""Cache"", ""Cache"", 1), `&`, ``, 1) + `,`, - `VolumeResult:` + strings.Replace(strings.Replace(this.VolumeResult.String(), ""Cache"", ""Cache"", 1), `&`, ``, 1) + `,`, -+ `SeriesResult:` + strings.Replace(strings.Replace(this.SeriesResult.String(), ""Cache"", ""Cache"", 1), `&`, ``, 1) + `,`, -+ `LabelResult:` + strings.Replace(strings.Replace(this.LabelResult.String(), ""Cache"", ""Cache"", 1), `&`, ``, 1) + `,`, - `}`, - }, """") - return s -@@ -2497,6 +2549,72 @@ func (m *Caches) Unmarshal(dAtA []byte) error { - return err - } - iNdEx = postIndex -+ case 6: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field SeriesResult"", wireType) -+ } -+ var msglen int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowStats -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ msglen |= int(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ if msglen < 0 { -+ return ErrInvalidLengthStats -+ } -+ postIndex := iNdEx + msglen -+ if postIndex < 0 { -+ return ErrInvalidLengthStats -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ if err := m.SeriesResult.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { -+ return err -+ } -+ iNdEx = postIndex -+ case 7: -+ if wireType != 2 { -+ return fmt.Errorf(""proto: wrong wireType = %d for field LabelResult"", wireType) -+ } -+ var msglen int -+ for shift := uint(0); ; shift += 7 { -+ if shift >= 64 { -+ return ErrIntOverflowStats -+ } -+ if iNdEx >= l { -+ return io.ErrUnexpectedEOF -+ } -+ b := dAtA[iNdEx] -+ iNdEx++ -+ msglen |= int(b&0x7F) << shift -+ if b < 0x80 { -+ break -+ } -+ } -+ if msglen < 0 { -+ return ErrInvalidLengthStats -+ } -+ postIndex := iNdEx + msglen -+ if postIndex < 0 { -+ return ErrInvalidLengthStats -+ } -+ if postIndex > l { -+ return io.ErrUnexpectedEOF -+ } -+ if err := m.LabelResult.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { -+ return err -+ } -+ iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) -diff --git a/pkg/logqlmodel/stats/stats.proto b/pkg/logqlmodel/stats/stats.proto -index 3aeaab7d0c9ad..0be2f2334a00b 100644 ---- a/pkg/logqlmodel/stats/stats.proto -+++ b/pkg/logqlmodel/stats/stats.proto -@@ -49,6 +49,14 @@ message Caches { - (gogoproto.nullable) = false, - (gogoproto.jsontag) = ""volumeResult"" - ]; -+ Cache seriesResult = 6 [ -+ (gogoproto.nullable) = false, -+ (gogoproto.jsontag) = ""seriesResult"" -+ ]; -+ Cache labelResult = 7 [ -+ (gogoproto.nullable) = false, -+ (gogoproto.jsontag) = ""labelResult"" -+ ]; - } - - // Summary is the summary of a query statistics. -diff --git a/pkg/loki/config_wrapper.go b/pkg/loki/config_wrapper.go -index 41a87775a9ecc..9817c04afdc5e 100644 ---- a/pkg/loki/config_wrapper.go -+++ b/pkg/loki/config_wrapper.go -@@ -619,14 +619,32 @@ func applyEmbeddedCacheConfig(r *ConfigWrapper) { - - indexStatsCacheConfig := r.QueryRange.StatsCacheConfig.CacheConfig - if !cache.IsCacheConfigured(indexStatsCacheConfig) { -+ prefix := indexStatsCacheConfig.Prefix - // We use the same config as the query range results cache. - r.QueryRange.StatsCacheConfig.CacheConfig = r.QueryRange.ResultsCacheConfig.CacheConfig -+ r.QueryRange.StatsCacheConfig.CacheConfig.Prefix = prefix - } - - volumeCacheConfig := r.QueryRange.VolumeCacheConfig.CacheConfig - if !cache.IsCacheConfigured(volumeCacheConfig) { -+ prefix := volumeCacheConfig.Prefix - // We use the same config as the query range results cache. - r.QueryRange.VolumeCacheConfig.CacheConfig = r.QueryRange.ResultsCacheConfig.CacheConfig -+ r.QueryRange.VolumeCacheConfig.CacheConfig.Prefix = prefix -+ } -+ -+ seriesCacheConfig := r.QueryRange.SeriesCacheConfig.CacheConfig -+ if !cache.IsCacheConfigured(seriesCacheConfig) { -+ prefix := seriesCacheConfig.Prefix -+ r.QueryRange.SeriesCacheConfig.CacheConfig = r.QueryRange.ResultsCacheConfig.CacheConfig -+ r.QueryRange.SeriesCacheConfig.CacheConfig.Prefix = prefix -+ } -+ -+ labelsCacheConfig := r.QueryRange.LabelsCacheConfig.CacheConfig -+ if !cache.IsCacheConfigured(labelsCacheConfig) { -+ prefix := labelsCacheConfig.Prefix -+ r.QueryRange.LabelsCacheConfig.CacheConfig = r.QueryRange.ResultsCacheConfig.CacheConfig -+ r.QueryRange.LabelsCacheConfig.CacheConfig.Prefix = prefix - } - } - -diff --git a/pkg/loki/config_wrapper_test.go b/pkg/loki/config_wrapper_test.go -index 7b3e8c9fff78c..866079b71f60f 100644 ---- a/pkg/loki/config_wrapper_test.go -+++ b/pkg/loki/config_wrapper_test.go -@@ -937,6 +937,7 @@ query_range: - - config, _, _ := configWrapperFromYAML(t, configFileString, nil) - assert.EqualValues(t, config.QueryRange.StatsCacheConfig.CacheConfig.Redis.Endpoint, ""endpoint.redis.org"") -+ assert.EqualValues(t, ""frontend.index-stats-results-cache."", config.QueryRange.StatsCacheConfig.CacheConfig.Prefix) - assert.False(t, config.QueryRange.StatsCacheConfig.CacheConfig.EmbeddedCache.Enabled) - }) - -@@ -950,17 +951,20 @@ query_range: - - config, _, _ := configWrapperFromYAML(t, configFileString, nil) - assert.EqualValues(t, ""memcached.host.org"", config.QueryRange.StatsCacheConfig.CacheConfig.MemcacheClient.Host) -+ assert.EqualValues(t, ""frontend.index-stats-results-cache."", config.QueryRange.StatsCacheConfig.CacheConfig.Prefix) - assert.False(t, config.QueryRange.StatsCacheConfig.CacheConfig.EmbeddedCache.Enabled) - }) - - t.Run(""embedded cache is enabled by default if no other cache is set"", func(t *testing.T) { - config, _, _ := configWrapperFromYAML(t, minimalConfig, nil) -+ assert.EqualValues(t, ""frontend.index-stats-results-cache."", config.QueryRange.StatsCacheConfig.CacheConfig.Prefix) - assert.True(t, config.QueryRange.StatsCacheConfig.CacheConfig.EmbeddedCache.Enabled) - }) - - t.Run(""gets results cache config if not configured directly"", func(t *testing.T) { - config, _, _ := configWrapperFromYAML(t, defaultResulsCacheString, nil) - assert.EqualValues(t, ""memcached.host.org"", config.QueryRange.StatsCacheConfig.CacheConfig.MemcacheClient.Host) -+ assert.EqualValues(t, ""frontend.index-stats-results-cache."", config.QueryRange.StatsCacheConfig.CacheConfig.Prefix) - assert.False(t, config.QueryRange.StatsCacheConfig.CacheConfig.EmbeddedCache.Enabled) - }) - }) -@@ -976,6 +980,7 @@ query_range: - - config, _, _ := configWrapperFromYAML(t, configFileString, nil) - assert.EqualValues(t, config.QueryRange.VolumeCacheConfig.CacheConfig.Redis.Endpoint, ""endpoint.redis.org"") -+ assert.EqualValues(t, ""frontend.volume-results-cache."", config.QueryRange.VolumeCacheConfig.CacheConfig.Prefix) - assert.False(t, config.QueryRange.VolumeCacheConfig.CacheConfig.EmbeddedCache.Enabled) - }) - -@@ -989,20 +994,109 @@ query_range: - - config, _, _ := configWrapperFromYAML(t, configFileString, nil) - assert.EqualValues(t, ""memcached.host.org"", config.QueryRange.VolumeCacheConfig.CacheConfig.MemcacheClient.Host) -+ assert.EqualValues(t, ""frontend.volume-results-cache."", config.QueryRange.VolumeCacheConfig.CacheConfig.Prefix) - assert.False(t, config.QueryRange.VolumeCacheConfig.CacheConfig.EmbeddedCache.Enabled) - }) - - t.Run(""embedded cache is enabled by default if no other cache is set"", func(t *testing.T) { - config, _, _ := configWrapperFromYAML(t, minimalConfig, nil) -+ assert.EqualValues(t, ""frontend.volume-results-cache."", config.QueryRange.VolumeCacheConfig.CacheConfig.Prefix) - assert.True(t, config.QueryRange.VolumeCacheConfig.CacheConfig.EmbeddedCache.Enabled) - }) - - t.Run(""gets results cache config if not configured directly"", func(t *testing.T) { - config, _, _ := configWrapperFromYAML(t, defaultResulsCacheString, nil) - assert.EqualValues(t, ""memcached.host.org"", config.QueryRange.VolumeCacheConfig.CacheConfig.MemcacheClient.Host) -+ assert.EqualValues(t, ""frontend.volume-results-cache."", config.QueryRange.VolumeCacheConfig.CacheConfig.Prefix) - assert.False(t, config.QueryRange.VolumeCacheConfig.CacheConfig.EmbeddedCache.Enabled) - }) - }) -+ -+ t.Run(""for the series results cache config"", func(t *testing.T) { -+ t.Run(""no embedded cache enabled by default if Redis is set"", func(t *testing.T) { -+ configFileString := `--- -+query_range: -+ series_results_cache: -+ cache: -+ redis: -+ endpoint: endpoint.redis.org` -+ -+ config, _, _ := configWrapperFromYAML(t, configFileString, nil) -+ assert.EqualValues(t, ""endpoint.redis.org"", config.QueryRange.SeriesCacheConfig.CacheConfig.Redis.Endpoint) -+ assert.EqualValues(t, ""frontend.series-results-cache."", config.QueryRange.SeriesCacheConfig.CacheConfig.Prefix) -+ assert.False(t, config.QueryRange.SeriesCacheConfig.CacheConfig.EmbeddedCache.Enabled) -+ }) -+ -+ t.Run(""no embedded cache enabled by default if Memcache is set"", func(t *testing.T) { -+ configFileString := `--- -+query_range: -+ series_results_cache: -+ cache: -+ memcached_client: -+ host: memcached.host.org` -+ -+ config, _, _ := configWrapperFromYAML(t, configFileString, nil) -+ assert.EqualValues(t, ""memcached.host.org"", config.QueryRange.SeriesCacheConfig.CacheConfig.MemcacheClient.Host) -+ assert.EqualValues(t, ""frontend.series-results-cache."", config.QueryRange.SeriesCacheConfig.CacheConfig.Prefix) -+ assert.False(t, config.QueryRange.SeriesCacheConfig.CacheConfig.EmbeddedCache.Enabled) -+ }) -+ -+ t.Run(""embedded cache is enabled by default if no other cache is set"", func(t *testing.T) { -+ config, _, _ := configWrapperFromYAML(t, minimalConfig, nil) -+ assert.True(t, config.QueryRange.SeriesCacheConfig.CacheConfig.EmbeddedCache.Enabled) -+ assert.EqualValues(t, ""frontend.series-results-cache."", config.QueryRange.SeriesCacheConfig.CacheConfig.Prefix) -+ }) -+ -+ t.Run(""gets results cache config if not configured directly"", func(t *testing.T) { -+ config, _, _ := configWrapperFromYAML(t, defaultResulsCacheString, nil) -+ assert.EqualValues(t, ""memcached.host.org"", config.QueryRange.SeriesCacheConfig.CacheConfig.MemcacheClient.Host) -+ assert.EqualValues(t, ""frontend.series-results-cache."", config.QueryRange.SeriesCacheConfig.CacheConfig.Prefix) -+ assert.False(t, config.QueryRange.SeriesCacheConfig.CacheConfig.EmbeddedCache.Enabled) -+ }) -+ }) -+ -+ t.Run(""for the labels results cache config"", func(t *testing.T) { -+ t.Run(""no embedded cache enabled by default if Redis is set"", func(t *testing.T) { -+ configFileString := `--- -+query_range: -+ label_results_cache: -+ cache: -+ redis: -+ endpoint: endpoint.redis.org` -+ -+ config, _, _ := configWrapperFromYAML(t, configFileString, nil) -+ assert.EqualValues(t, ""endpoint.redis.org"", config.QueryRange.LabelsCacheConfig.CacheConfig.Redis.Endpoint) -+ assert.EqualValues(t, ""frontend.label-results-cache."", config.QueryRange.LabelsCacheConfig.CacheConfig.Prefix) -+ assert.False(t, config.QueryRange.LabelsCacheConfig.CacheConfig.EmbeddedCache.Enabled) -+ }) -+ -+ t.Run(""no embedded cache enabled by default if Memcache is set"", func(t *testing.T) { -+ configFileString := `--- -+query_range: -+ label_results_cache: -+ cache: -+ memcached_client: -+ host: memcached.host.org` -+ -+ config, _, _ := configWrapperFromYAML(t, configFileString, nil) -+ assert.EqualValues(t, ""memcached.host.org"", config.QueryRange.LabelsCacheConfig.CacheConfig.MemcacheClient.Host) -+ assert.EqualValues(t, ""frontend.label-results-cache."", config.QueryRange.LabelsCacheConfig.CacheConfig.Prefix) -+ assert.False(t, config.QueryRange.LabelsCacheConfig.CacheConfig.EmbeddedCache.Enabled) -+ }) -+ -+ t.Run(""embedded cache is enabled by default if no other cache is set"", func(t *testing.T) { -+ config, _, _ := configWrapperFromYAML(t, minimalConfig, nil) -+ assert.True(t, config.QueryRange.LabelsCacheConfig.CacheConfig.EmbeddedCache.Enabled) -+ assert.EqualValues(t, ""frontend.label-results-cache."", config.QueryRange.LabelsCacheConfig.CacheConfig.Prefix) -+ }) -+ -+ t.Run(""gets results cache config if not configured directly"", func(t *testing.T) { -+ config, _, _ := configWrapperFromYAML(t, defaultResulsCacheString, nil) -+ assert.EqualValues(t, ""memcached.host.org"", config.QueryRange.LabelsCacheConfig.CacheConfig.MemcacheClient.Host) -+ assert.EqualValues(t, ""frontend.label-results-cache."", config.QueryRange.LabelsCacheConfig.CacheConfig.Prefix) -+ assert.False(t, config.QueryRange.LabelsCacheConfig.CacheConfig.EmbeddedCache.Enabled) -+ }) -+ }) - } - - func TestDefaultUnmarshal(t *testing.T) { -diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go -index d5a324779bf21..205cae586e099 100644 ---- a/pkg/querier/queryrange/codec.go -+++ b/pkg/querier/queryrange/codec.go -@@ -231,10 +231,8 @@ func (r *LabelRequest) GetStep() int64 { - - func (r *LabelRequest) WithStartEnd(s, e time.Time) queryrangebase.Request { - clone := *r -- tmp := s -- clone.Start = &tmp -- tmp = e -- clone.End = &tmp -+ clone.Start = &s -+ clone.End = &e - return &clone - } - -@@ -863,15 +861,17 @@ func decodeResponseJSONFrom(buf []byte, req queryrangebase.Request, headers http - - switch req := req.(type) { - case *LokiSeriesRequest: -- resp := &LokiSeriesResponse{ -- Version: uint32(loghttp.GetVersion(req.Path)), -- Headers: httpResponseHeadersToPromResponseHeaders(headers), -- } -+ var resp LokiSeriesResponse - if err := json.Unmarshal(buf, &resp); err != nil { - return nil, httpgrpc.Errorf(http.StatusInternalServerError, ""error decoding response: %v"", err) - } - -- return resp, nil -+ return &LokiSeriesResponse{ -+ Status: resp.Status, -+ Version: uint32(loghttp.GetVersion(req.Path)), -+ Headers: httpResponseHeadersToPromResponseHeaders(headers), -+ Data: resp.Data, -+ }, nil - case *LabelRequest: - var resp loghttp.LabelResponse - if err := json.Unmarshal(buf, &resp); err != nil { -@@ -1206,6 +1206,7 @@ func (Codec) MergeResponse(responses ...queryrangebase.Response) (queryrangebase - Status: lokiSeriesRes.Status, - Version: lokiSeriesRes.Version, - Data: lokiSeriesData, -+ Headers: lokiSeriesRes.Headers, - Statistics: mergedStats, - }, nil - case *LokiSeriesResponseView: -@@ -1240,6 +1241,7 @@ func (Codec) MergeResponse(responses ...queryrangebase.Response) (queryrangebase - return &LokiLabelNamesResponse{ - Status: labelNameRes.Status, - Version: labelNameRes.Version, -+ Headers: labelNameRes.Headers, - Data: names, - Statistics: mergedStats, - }, nil -diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go -index 215590f267766..936a702662c41 100644 ---- a/pkg/querier/queryrange/codec_test.go -+++ b/pkg/querier/queryrange/codec_test.go -@@ -1612,6 +1612,24 @@ var ( - ""requests"": 0, - ""downloadTime"": 0 - }, -+ ""seriesResult"": { -+ ""entriesFound"": 0, -+ ""entriesRequested"": 0, -+ ""entriesStored"": 0, -+ ""bytesReceived"": 0, -+ ""bytesSent"": 0, -+ ""requests"": 0, -+ ""downloadTime"": 0 -+ }, -+ ""labelResult"": { -+ ""entriesFound"": 0, -+ ""entriesRequested"": 0, -+ ""entriesStored"": 0, -+ ""bytesReceived"": 0, -+ ""bytesSent"": 0, -+ ""requests"": 0, -+ ""downloadTime"": 0 -+ }, - ""volumeResult"": { - ""entriesFound"": 0, - ""entriesRequested"": 0, -@@ -2006,6 +2024,8 @@ var ( - Index: stats.Cache{}, - StatsResult: stats.Cache{}, - VolumeResult: stats.Cache{}, -+ SeriesResult: stats.Cache{}, -+ LabelResult: stats.Cache{}, - Result: stats.Cache{}, - }, - } -diff --git a/pkg/querier/queryrange/labels_cache.go b/pkg/querier/queryrange/labels_cache.go -new file mode 100644 -index 0000000000000..1e0dd225fa7b0 ---- /dev/null -+++ b/pkg/querier/queryrange/labels_cache.go -@@ -0,0 +1,99 @@ -+package queryrange -+ -+import ( -+ ""context"" -+ ""flag"" -+ ""fmt"" -+ ""time"" -+ -+ ""github.com/go-kit/log"" -+ -+ ""github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"" -+ ""github.com/grafana/loki/pkg/storage/chunk/cache"" -+ ""github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"" -+) -+ -+type cacheKeyLabels struct { -+ Limits -+ transformer UserIDTransformer -+} -+ -+// GenerateCacheKey generates a cache key based on the userID, split duration and the interval of the request. -+// It also includes the label name and the provided query for label values request. -+func (i cacheKeyLabels) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string { -+ lr := r.(*LabelRequest) -+ split := i.MetadataQuerySplitDuration(userID) -+ -+ var currentInterval int64 -+ if denominator := int64(split / time.Millisecond); denominator > 0 { -+ currentInterval = lr.GetStart().UnixMilli() / denominator -+ } -+ -+ if i.transformer != nil { -+ userID = i.transformer(ctx, userID) -+ } -+ -+ if lr.GetValues() { -+ return fmt.Sprintf(""labelvalues:%s:%s:%s:%d:%d"", userID, lr.GetName(), lr.GetQuery(), currentInterval, split) -+ } -+ -+ return fmt.Sprintf(""labels:%s:%d:%d"", userID, currentInterval, split) -+} -+ -+type labelsExtractor struct{} -+ -+// Extract extracts the labels response for the specific time range. -+// It is a no-op since it is not possible to partition the labels data by time range as it is just a slice of strings. -+func (p labelsExtractor) Extract(_, _ int64, res resultscache.Response, _, _ int64) resultscache.Response { -+ return res -+} -+ -+func (p labelsExtractor) ResponseWithoutHeaders(resp queryrangebase.Response) queryrangebase.Response { -+ labelsResp := resp.(*LokiLabelNamesResponse) -+ return &LokiLabelNamesResponse{ -+ Status: labelsResp.Status, -+ Data: labelsResp.Data, -+ Version: labelsResp.Version, -+ Statistics: labelsResp.Statistics, -+ } -+} -+ -+type LabelsCacheConfig struct { -+ queryrangebase.ResultsCacheConfig `yaml:"",inline""` -+} -+ -+// RegisterFlags registers flags. -+func (cfg *LabelsCacheConfig) RegisterFlags(f *flag.FlagSet) { -+ cfg.RegisterFlagsWithPrefix(f, ""frontend.label-results-cache."") -+} -+ -+func (cfg *LabelsCacheConfig) Validate() error { -+ return cfg.ResultsCacheConfig.Validate() -+} -+ -+func NewLabelsCacheMiddleware( -+ logger log.Logger, -+ limits Limits, -+ merger queryrangebase.Merger, -+ c cache.Cache, -+ cacheGenNumberLoader queryrangebase.CacheGenNumberLoader, -+ shouldCache queryrangebase.ShouldCacheFn, -+ parallelismForReq queryrangebase.ParallelismForReqFn, -+ retentionEnabled bool, -+ transformer UserIDTransformer, -+ metrics *queryrangebase.ResultsCacheMetrics, -+) (queryrangebase.Middleware, error) { -+ return queryrangebase.NewResultsCacheMiddleware( -+ logger, -+ c, -+ cacheKeyLabels{limits, transformer}, -+ limits, -+ merger, -+ labelsExtractor{}, -+ cacheGenNumberLoader, -+ shouldCache, -+ parallelismForReq, -+ retentionEnabled, -+ metrics, -+ ) -+} -diff --git a/pkg/querier/queryrange/labels_cache_test.go b/pkg/querier/queryrange/labels_cache_test.go -new file mode 100644 -index 0000000000000..73ab9ad8f4f84 ---- /dev/null -+++ b/pkg/querier/queryrange/labels_cache_test.go -@@ -0,0 +1,251 @@ -+package queryrange -+ -+import ( -+ ""context"" -+ ""fmt"" -+ ""testing"" -+ ""time"" -+ -+ ""github.com/go-kit/log"" -+ ""github.com/grafana/dskit/user"" -+ ""github.com/stretchr/testify/require"" -+ -+ ""github.com/grafana/loki/pkg/loghttp"" -+ ""github.com/grafana/loki/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/logqlmodel/stats"" -+ ""github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"" -+ ""github.com/grafana/loki/pkg/storage/chunk/cache"" -+ ""github.com/grafana/loki/pkg/util"" -+) -+ -+func TestCacheKeyLabels_GenerateCacheKey(t *testing.T) { -+ k := cacheKeyLabels{ -+ transformer: nil, -+ Limits: fakeLimits{ -+ metadataSplitDuration: map[string]time.Duration{ -+ ""fake"": time.Hour, -+ }, -+ }, -+ } -+ -+ from, through := util.RoundToMilliseconds(testTime, testTime.Add(1*time.Hour)) -+ start := from.Time() -+ end := through.Time() -+ -+ req := LabelRequest{ -+ LabelRequest: logproto.LabelRequest{ -+ Start: &start, -+ End: &end, -+ }, -+ } -+ -+ expectedInterval := testTime.UnixMilli() / time.Hour.Milliseconds() -+ -+ t.Run(""labels"", func(t *testing.T) { -+ require.Equal(t, fmt.Sprintf(`labels:fake:%d:%d`, expectedInterval, time.Hour.Nanoseconds()), k.GenerateCacheKey(context.Background(), ""fake"", &req)) -+ }) -+ -+ t.Run(""label values"", func(t *testing.T) { -+ req := req -+ req.Name = ""foo"" -+ req.Values = true -+ require.Equal(t, fmt.Sprintf(`labelvalues:fake:foo::%d:%d`, expectedInterval, time.Hour.Nanoseconds()), k.GenerateCacheKey(context.Background(), ""fake"", &req)) -+ -+ req.Query = `{cluster=""eu-west1""}` -+ require.Equal(t, fmt.Sprintf(`labelvalues:fake:foo:{cluster=""eu-west1""}:%d:%d`, expectedInterval, time.Hour.Nanoseconds()), k.GenerateCacheKey(context.Background(), ""fake"", &req)) -+ }) -+} -+ -+func TestLabelsCache(t *testing.T) { -+ setupCacheMW := func() queryrangebase.Middleware { -+ cacheMiddleware, err := NewLabelsCacheMiddleware( -+ log.NewNopLogger(), -+ fakeLimits{ -+ metadataSplitDuration: map[string]time.Duration{ -+ ""fake"": 24 * time.Hour, -+ }, -+ }, -+ DefaultCodec, -+ cache.NewMockCache(), -+ nil, -+ nil, -+ func(_ context.Context, _ []string, _ queryrangebase.Request) int { -+ return 1 -+ }, -+ false, -+ nil, -+ nil, -+ ) -+ require.NoError(t, err) -+ return cacheMiddleware -+ } -+ -+ cacheMiddleware := setupCacheMW() -+ for _, values := range []bool{false, true} { -+ prefix := ""labels"" -+ if values { -+ prefix = ""label values"" -+ } -+ t.Run(prefix+"": cache the response for the same request"", func(t *testing.T) { -+ start := testTime.Truncate(time.Millisecond) -+ end := start.Add(time.Hour) -+ -+ labelsReq := LabelRequest{ -+ LabelRequest: logproto.LabelRequest{ -+ Start: &start, -+ End: &end, -+ }, -+ } -+ -+ if values { -+ labelsReq.Values = true -+ labelsReq.Name = ""foo"" -+ labelsReq.Query = `{cluster=""eu-west1""}` -+ } -+ -+ labelsResp := &LokiLabelNamesResponse{ -+ Status: ""success"", -+ Version: uint32(loghttp.VersionV1), -+ Data: []string{""bar"", ""buzz""}, -+ Statistics: stats.Result{ -+ Summary: stats.Summary{ -+ Splits: 1, -+ }, -+ }, -+ } -+ -+ called := 0 -+ handler := cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { -+ called++ -+ -+ // should request the entire length with no partitioning as nothing is cached yet. -+ require.Equal(t, labelsReq.GetStart(), r.GetStart()) -+ require.Equal(t, labelsReq.GetEnd(), r.GetEnd()) -+ -+ got := r.(*LabelRequest) -+ require.Equal(t, labelsReq.GetName(), got.GetName()) -+ require.Equal(t, labelsReq.GetValues(), got.GetValues()) -+ require.Equal(t, labelsReq.GetQuery(), got.GetQuery()) -+ -+ return labelsResp, nil -+ })) -+ -+ ctx := user.InjectOrgID(context.Background(), ""fake"") -+ got, err := handler.Do(ctx, &labelsReq) -+ require.NoError(t, err) -+ require.Equal(t, 1, called) // called actual handler, as not cached. -+ require.Equal(t, labelsResp, got) -+ -+ // Doing same request again shouldn't change anything. -+ called = 0 -+ got, err = handler.Do(ctx, &labelsReq) -+ require.NoError(t, err) -+ require.Equal(t, 0, called) -+ require.Equal(t, labelsResp, got) -+ }) -+ } -+ -+ // reset cacheMiddleware -+ cacheMiddleware = setupCacheMW() -+ for _, values := range []bool{false, true} { -+ prefix := ""labels"" -+ if values { -+ prefix = ""label values"" -+ } -+ t.Run(prefix+"": a new request with overlapping time range should reuse part of the previous request for the overlap"", func(t *testing.T) { -+ cacheMiddleware := setupCacheMW() -+ -+ start := testTime.Truncate(time.Millisecond) -+ end := start.Add(time.Hour) -+ -+ labelsReq1 := LabelRequest{ -+ LabelRequest: logproto.LabelRequest{ -+ Start: &start, -+ End: &end, -+ }, -+ } -+ -+ if values { -+ labelsReq1.Values = true -+ labelsReq1.Name = ""foo"" -+ labelsReq1.Query = `{cluster=""eu-west1""}` -+ } -+ -+ labelsResp1 := &LokiLabelNamesResponse{ -+ Status: ""success"", -+ Version: uint32(loghttp.VersionV1), -+ Data: []string{""bar"", ""buzz""}, -+ Statistics: stats.Result{ -+ Summary: stats.Summary{ -+ Splits: 1, -+ }, -+ }, -+ } -+ -+ called := 0 -+ handler := cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { -+ called++ -+ -+ // should request the entire length with no partitioning as nothing is cached yet. -+ require.Equal(t, labelsReq1.GetStart(), r.GetStart()) -+ require.Equal(t, labelsReq1.GetEnd(), r.GetEnd()) -+ -+ got := r.(*LabelRequest) -+ require.Equal(t, labelsReq1.GetName(), got.GetName()) -+ require.Equal(t, labelsReq1.GetValues(), got.GetValues()) -+ require.Equal(t, labelsReq1.GetQuery(), got.GetQuery()) -+ -+ return labelsResp1, nil -+ })) -+ -+ ctx := user.InjectOrgID(context.Background(), ""fake"") -+ got, err := handler.Do(ctx, &labelsReq1) -+ require.NoError(t, err) -+ require.Equal(t, 1, called) -+ require.Equal(t, labelsResp1, got) -+ -+ labelsReq2 := labelsReq1.WithStartEnd(labelsReq1.GetStart().Add(15*time.Minute), labelsReq1.GetEnd().Add(15*time.Minute)) -+ -+ called = 0 -+ handler = cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { -+ called++ -+ -+ // make downstream request only for the non-overlapping portion of the query. -+ require.Equal(t, labelsReq1.GetEnd(), r.GetStart()) -+ require.Equal(t, labelsReq2.GetEnd(), r.GetEnd()) -+ -+ got := r.(*LabelRequest) -+ require.Equal(t, labelsReq1.GetName(), got.GetName()) -+ require.Equal(t, labelsReq1.GetValues(), got.GetValues()) -+ require.Equal(t, labelsReq1.GetQuery(), got.GetQuery()) -+ -+ return &LokiLabelNamesResponse{ -+ Status: ""success"", -+ Version: uint32(loghttp.VersionV1), -+ Data: []string{""fizz""}, -+ Statistics: stats.Result{ -+ Summary: stats.Summary{ -+ Splits: 1, -+ }, -+ }, -+ }, nil -+ })) -+ -+ got, err = handler.Do(ctx, labelsReq2) -+ require.NoError(t, err) -+ require.Equal(t, 1, called) -+ // two splits as we merge the results from the extent and downstream request -+ labelsResp1.Statistics.Summary.Splits = 2 -+ require.Equal(t, &LokiLabelNamesResponse{ -+ Status: ""success"", -+ Version: uint32(loghttp.VersionV1), -+ Data: []string{""bar"", ""buzz"", ""fizz""}, -+ Statistics: stats.Result{ -+ Summary: stats.Summary{ -+ Splits: 2, -+ }, -+ }, -+ }, got) -+ }) -+ } -+} -diff --git a/pkg/querier/queryrange/limits/definitions.go b/pkg/querier/queryrange/limits/definitions.go -index bc8f7d0ec94bd..bd84e144fa47d 100644 ---- a/pkg/querier/queryrange/limits/definitions.go -+++ b/pkg/querier/queryrange/limits/definitions.go -@@ -14,6 +14,7 @@ type Limits interface { - queryrangebase.Limits - logql.Limits - QuerySplitDuration(string) time.Duration -+ MetadataQuerySplitDuration(string) time.Duration - MaxQuerySeries(context.Context, string) int - MaxEntriesLimitPerQuery(context.Context, string) int - MinShardingLookback(string) time.Duration -diff --git a/pkg/querier/queryrange/limits_test.go b/pkg/querier/queryrange/limits_test.go -index efc9b030f7f84..3b82c1dc9eabb 100644 ---- a/pkg/querier/queryrange/limits_test.go -+++ b/pkg/querier/queryrange/limits_test.go -@@ -29,7 +29,7 @@ import ( - - func TestLimits(t *testing.T) { - l := fakeLimits{ -- splits: map[string]time.Duration{""a"": time.Minute}, -+ splitDuration: map[string]time.Duration{""a"": time.Minute}, - } - - wrapped := WithSplitByLimits(l, time.Hour) -diff --git a/pkg/querier/queryrange/log_result_cache_test.go b/pkg/querier/queryrange/log_result_cache_test.go -index 815e24b77ea9e..5d67be33b84fd 100644 ---- a/pkg/querier/queryrange/log_result_cache_test.go -+++ b/pkg/querier/queryrange/log_result_cache_test.go -@@ -32,7 +32,7 @@ func Test_LogResultCacheSameRange(t *testing.T) { - lrc = NewLogResultCache( - log.NewNopLogger(), - fakeLimits{ -- splits: map[string]time.Duration{""foo"": time.Minute}, -+ splitDuration: map[string]time.Duration{""foo"": time.Minute}, - }, - cache.NewMockCache(), - nil, -@@ -74,7 +74,7 @@ func Test_LogResultCacheSameRangeNonEmpty(t *testing.T) { - lrc = NewLogResultCache( - log.NewNopLogger(), - fakeLimits{ -- splits: map[string]time.Duration{""foo"": time.Minute}, -+ splitDuration: map[string]time.Duration{""foo"": time.Minute}, - }, - cache.NewMockCache(), - nil, -@@ -122,7 +122,7 @@ func Test_LogResultCacheSmallerRange(t *testing.T) { - lrc = NewLogResultCache( - log.NewNopLogger(), - fakeLimits{ -- splits: map[string]time.Duration{""foo"": time.Minute}, -+ splitDuration: map[string]time.Duration{""foo"": time.Minute}, - }, - cache.NewMockCache(), - nil, -@@ -172,7 +172,7 @@ func Test_LogResultCacheDifferentRange(t *testing.T) { - lrc = NewLogResultCache( - log.NewNopLogger(), - fakeLimits{ -- splits: map[string]time.Duration{""foo"": time.Minute}, -+ splitDuration: map[string]time.Duration{""foo"": time.Minute}, - }, - cache.NewMockCache(), - nil, -@@ -248,7 +248,7 @@ func Test_LogResultCacheDifferentRangeNonEmpty(t *testing.T) { - lrc = NewLogResultCache( - log.NewNopLogger(), - fakeLimits{ -- splits: map[string]time.Duration{""foo"": time.Minute}, -+ splitDuration: map[string]time.Duration{""foo"": time.Minute}, - }, - cache.NewMockCache(), - nil, -@@ -335,7 +335,7 @@ func Test_LogResultCacheDifferentRangeNonEmptyAndEmpty(t *testing.T) { - lrc = NewLogResultCache( - log.NewNopLogger(), - fakeLimits{ -- splits: map[string]time.Duration{""foo"": time.Minute}, -+ splitDuration: map[string]time.Duration{""foo"": time.Minute}, - }, - cache.NewMockCache(), - nil, -@@ -445,7 +445,7 @@ func Test_LogResultNonOverlappingCache(t *testing.T) { - lrc = NewLogResultCache( - log.NewNopLogger(), - fakeLimits{ -- splits: map[string]time.Duration{""foo"": time.Minute}, -+ splitDuration: map[string]time.Duration{""foo"": time.Minute}, - }, - mockCache, - nil, -diff --git a/pkg/querier/queryrange/prometheus_test.go b/pkg/querier/queryrange/prometheus_test.go -index c0529906241a5..98eb563ca7bd2 100644 ---- a/pkg/querier/queryrange/prometheus_test.go -+++ b/pkg/querier/queryrange/prometheus_test.go -@@ -85,6 +85,24 @@ var emptyStats = `""stats"": { - ""requests"": 0, - ""downloadTime"": 0 - }, -+ ""seriesResult"": { -+ ""entriesFound"": 0, -+ ""entriesRequested"": 0, -+ ""entriesStored"": 0, -+ ""bytesReceived"": 0, -+ ""bytesSent"": 0, -+ ""requests"": 0, -+ ""downloadTime"": 0 -+ }, -+ ""labelResult"": { -+ ""entriesFound"": 0, -+ ""entriesRequested"": 0, -+ ""entriesStored"": 0, -+ ""bytesReceived"": 0, -+ ""bytesSent"": 0, -+ ""requests"": 0, -+ ""downloadTime"": 0 -+ }, - ""volumeResult"": { - ""entriesFound"": 0, - ""entriesRequested"": 0, -diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go -index c03d459ba9b23..5f0aef4a1ab49 100644 ---- a/pkg/querier/queryrange/roundtrip.go -+++ b/pkg/querier/queryrange/roundtrip.go -@@ -3,6 +3,7 @@ package queryrange - import ( - ""context"" - ""flag"" -+ ""fmt"" - ""net/http"" - ""strings"" - ""time"" -@@ -28,6 +29,19 @@ import ( - logutil ""github.com/grafana/loki/pkg/util/log"" - ) - -+const ( -+ // Parallelize the index stats requests, so it doesn't send a huge request to a single index-gw (i.e. {app=~"".+""} for 30d). -+ // Indices are sharded by 24 hours, so we split the stats request in 24h intervals. -+ indexStatsQuerySplitInterval = 24 * time.Hour -+ -+ // Limited queries only need to fetch up to the requested line limit worth of logs, -+ // Our defaults for splitting and parallelism are much too aggressive for large customers and result in -+ // potentially GB of logs being returned by all the shards and splits which will overwhelm the frontend -+ // Therefore we force max parallelism to `1` so that these queries are executed sequentially. -+ // Below we also fix the number of shards to a static number. -+ limitedQuerySplits = 1 -+) -+ - // Config is the configuration for the queryrange tripperware - type Config struct { - base.Config `yaml:"",inline""` -@@ -36,6 +50,10 @@ type Config struct { - StatsCacheConfig IndexStatsCacheConfig `yaml:""index_stats_results_cache"" doc:""description=If a cache config is not specified and cache_index_stats_results is true, the config for the results cache is used.""` - CacheVolumeResults bool `yaml:""cache_volume_results""` - VolumeCacheConfig VolumeCacheConfig `yaml:""volume_results_cache"" doc:""description=If a cache config is not specified and cache_volume_results is true, the config for the results cache is used.""` -+ CacheSeriesResults bool `yaml:""cache_series_results""` -+ SeriesCacheConfig SeriesCacheConfig `yaml:""series_results_cache"" doc:""description=If series_results_cache is not configured and cache_series_results is true, the config for the results cache is used.""` -+ CacheLabelResults bool `yaml:""cache_label_results""` -+ LabelsCacheConfig LabelsCacheConfig `yaml:""label_results_cache"" doc:""description=If label_results_cache is not configured and cache_label_results is true, the config for the results cache is used.""` - } - - // RegisterFlags adds the flags required to configure this flag set. -@@ -45,6 +63,10 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.StatsCacheConfig.RegisterFlags(f) - f.BoolVar(&cfg.CacheVolumeResults, ""querier.cache-volume-results"", false, ""Cache volume query results."") - cfg.VolumeCacheConfig.RegisterFlags(f) -+ f.BoolVar(&cfg.CacheSeriesResults, ""querier.cache-series-results"", false, ""Cache series query results."") -+ cfg.SeriesCacheConfig.RegisterFlags(f) -+ f.BoolVar(&cfg.CacheLabelResults, ""querier.cache-label-results"", false, ""Cache label query results."") -+ cfg.LabelsCacheConfig.RegisterFlags(f) - } - - // Validate validates the config. -@@ -112,6 +134,8 @@ func NewMiddleware( - resultsCache cache.Cache - statsCache cache.Cache - volumeCache cache.Cache -+ seriesCache cache.Cache -+ labelsCache cache.Cache - err error - ) - -@@ -123,28 +147,28 @@ func NewMiddleware( - } - - if cfg.CacheIndexStatsResults { -- // If the stats cache is not configured, use the results cache config. -- cacheCfg := cfg.StatsCacheConfig.ResultsCacheConfig -- if !cache.IsCacheConfigured(cacheCfg.CacheConfig) { -- level.Debug(log).Log(""msg"", ""using results cache config for stats cache"") -- cacheCfg = cfg.ResultsCacheConfig -+ statsCache, err = newResultsCacheFromConfig(cfg.StatsCacheConfig.ResultsCacheConfig, registerer, log, stats.StatsResultCache) -+ if err != nil { -+ return nil, nil, err - } -+ } - -- statsCache, err = newResultsCacheFromConfig(cacheCfg, registerer, log, stats.StatsResultCache) -+ if cfg.CacheVolumeResults { -+ volumeCache, err = newResultsCacheFromConfig(cfg.VolumeCacheConfig.ResultsCacheConfig, registerer, log, stats.VolumeResultCache) - if err != nil { - return nil, nil, err - } - } - -- if cfg.CacheVolumeResults { -- // If the volume cache is not configured, use the results cache config. -- cacheCfg := cfg.VolumeCacheConfig.ResultsCacheConfig -- if !cache.IsCacheConfigured(cacheCfg.CacheConfig) { -- level.Debug(log).Log(""msg"", ""using results cache config for volume cache"") -- cacheCfg = cfg.ResultsCacheConfig -+ if cfg.CacheSeriesResults { -+ seriesCache, err = newResultsCacheFromConfig(cfg.SeriesCacheConfig.ResultsCacheConfig, registerer, log, stats.SeriesResultCache) -+ if err != nil { -+ return nil, nil, err - } -+ } - -- volumeCache, err = newResultsCacheFromConfig(cacheCfg, registerer, log, stats.VolumeResultCache) -+ if cfg.CacheLabelResults { -+ labelsCache, err = newResultsCacheFromConfig(cfg.LabelsCacheConfig.ResultsCacheConfig, registerer, log, stats.LabelResultCache) - if err != nil { - return nil, nil, err - } -@@ -176,12 +200,12 @@ func NewMiddleware( - return nil, nil, err - } - -- seriesTripperware, err := NewSeriesTripperware(cfg, log, limits, metrics, schema, DefaultCodec, metricsNamespace) -+ seriesTripperware, err := NewSeriesTripperware(cfg, log, limits, metrics, schema, codec, seriesCache, cacheGenNumLoader, retentionEnabled, metricsNamespace) - if err != nil { - return nil, nil, err - } - -- labelsTripperware, err := NewLabelsTripperware(cfg, log, limits, codec, metrics, schema, metricsNamespace) -+ labelsTripperware, err := NewLabelsTripperware(cfg, log, limits, codec, labelsCache, cacheGenNumLoader, retentionEnabled, metrics, schema, metricsNamespace) - if err != nil { - return nil, nil, err - } -@@ -475,12 +499,7 @@ func NewLimitedTripperware( - NewLimitsMiddleware(limits), - NewQuerySizeLimiterMiddleware(schema.Configs, engineOpts, log, limits, statsHandler), - base.InstrumentMiddleware(""split_by_interval"", metrics.InstrumentMiddlewareMetrics), -- // Limited queries only need to fetch up to the requested line limit worth of logs, -- // Our defaults for splitting and parallelism are much too aggressive for large customers and result in -- // potentially GB of logs being returned by all the shards and splits which will overwhelm the frontend -- // Therefore we force max parallelism to one so that these queries are executed sequentially. -- // Below we also fix the number of shards to a static number. -- SplitByIntervalMiddleware(schema.Configs, WithMaxParallelism(limits, 1), merger, splitByTime, metrics.SplitByMetrics), -+ SplitByIntervalMiddleware(schema.Configs, WithMaxParallelism(limits, limitedQuerySplits), merger, splitByTime, metrics.SplitByMetrics), - NewQuerierSizeLimiterMiddleware(schema.Configs, engineOpts, log, limits, statsHandler), - } - -@@ -499,16 +518,56 @@ func NewSeriesTripperware( - metrics *Metrics, - schema config.SchemaConfig, - merger base.Merger, -+ c cache.Cache, -+ cacheGenNumLoader base.CacheGenNumberLoader, -+ retentionEnabled bool, - metricsNamespace string, - ) (base.Middleware, error) { -+ var cacheMiddleware base.Middleware -+ if cfg.CacheSeriesResults { -+ var err error -+ cacheMiddleware, err = NewSeriesCacheMiddleware( -+ log, -+ limits, -+ merger, -+ c, -+ cacheGenNumLoader, -+ func(_ context.Context, r base.Request) bool { -+ return !r.GetCachingOptions().Disabled -+ }, -+ func(ctx context.Context, tenantIDs []string, r base.Request) int { -+ return MinWeightedParallelism( -+ ctx, -+ tenantIDs, -+ schema.Configs, -+ limits, -+ model.Time(r.GetStart().UnixMilli()), -+ model.Time(r.GetEnd().UnixMilli()), -+ ) -+ }, -+ retentionEnabled, -+ cfg.Transformer, -+ metrics.ResultsCacheMetrics, -+ ) -+ if err != nil { -+ return nil, fmt.Errorf(""failed to create series cache middleware: %w"", err) -+ } -+ } -+ - queryRangeMiddleware := []base.Middleware{ - StatsCollectorMiddleware(), - NewLimitsMiddleware(limits), - base.InstrumentMiddleware(""split_by_interval"", metrics.InstrumentMiddlewareMetrics), -- // The Series API needs to pull one chunk per series to extract the label set, which is much cheaper than iterating through all matching chunks. -- // Force a 24 hours split by for series API, this will be more efficient with our static daily bucket storage. -- // This would avoid queriers downloading chunks for same series over and over again for serving smaller queries. -- SplitByIntervalMiddleware(schema.Configs, WithSplitByLimits(limits, 24*time.Hour), merger, splitByTime, metrics.SplitByMetrics), -+ SplitByIntervalMiddleware(schema.Configs, limits, merger, splitByTime, metrics.SplitByMetrics), -+ } -+ -+ if cfg.CacheSeriesResults { -+ queryRangeMiddleware = append( -+ queryRangeMiddleware, -+ base.InstrumentMiddleware(""series_results_cache"", metrics.InstrumentMiddlewareMetrics), -+ cacheMiddleware, -+ ) -+ - } - - if cfg.MaxRetries > 0 { -@@ -542,17 +601,58 @@ func NewLabelsTripperware( - log log.Logger, - limits Limits, - merger base.Merger, -+ c cache.Cache, -+ cacheGenNumLoader base.CacheGenNumberLoader, -+ retentionEnabled bool, - metrics *Metrics, - schema config.SchemaConfig, - metricsNamespace string, - ) (base.Middleware, error) { -+ var cacheMiddleware base.Middleware -+ if cfg.CacheLabelResults { -+ var err error -+ cacheMiddleware, err = NewLabelsCacheMiddleware( -+ log, -+ limits, -+ merger, -+ c, -+ cacheGenNumLoader, -+ func(_ context.Context, r base.Request) bool { -+ return !r.GetCachingOptions().Disabled -+ }, -+ func(ctx context.Context, tenantIDs []string, r base.Request) int { -+ return MinWeightedParallelism( -+ ctx, -+ tenantIDs, -+ schema.Configs, -+ limits, -+ model.Time(r.GetStart().UnixMilli()), -+ model.Time(r.GetEnd().UnixMilli()), -+ ) -+ }, -+ retentionEnabled, -+ cfg.Transformer, -+ metrics.ResultsCacheMetrics, -+ ) -+ if err != nil { -+ return nil, fmt.Errorf(""failed to create labels cache middleware: %w"", err) -+ } -+ } -+ - queryRangeMiddleware := []base.Middleware{ - StatsCollectorMiddleware(), - NewLimitsMiddleware(limits), - base.InstrumentMiddleware(""split_by_interval"", metrics.InstrumentMiddlewareMetrics), -- // Force a 24 hours split by for labels API, this will be more efficient with our static daily bucket storage. -- // This is because the labels API is an index-only operation. -- SplitByIntervalMiddleware(schema.Configs, WithSplitByLimits(limits, 24*time.Hour), merger, splitByTime, metrics.SplitByMetrics), -+ SplitByIntervalMiddleware(schema.Configs, limits, merger, splitByTime, metrics.SplitByMetrics), -+ } -+ -+ if cfg.CacheLabelResults { -+ queryRangeMiddleware = append( -+ queryRangeMiddleware, -+ base.InstrumentMiddleware(""label_results_cache"", metrics.InstrumentMiddlewareMetrics), -+ cacheMiddleware, -+ ) -+ - } - - if cfg.MaxRetries > 0 { -@@ -874,9 +974,7 @@ func NewIndexStatsTripperware( - metrics *Metrics, - metricsNamespace string, - ) (base.Middleware, error) { -- // Parallelize the index stats requests, so it doesn't send a huge request to a single index-gw (i.e. {app=~"".+""} for 30d). -- // Indices are sharded by 24 hours, so we split the stats request in 24h intervals. -- limits = WithSplitByLimits(limits, 24*time.Hour) -+ limits = WithSplitByLimits(limits, indexStatsQuerySplitInterval) - - var cacheMiddleware base.Middleware - if cfg.CacheIndexStatsResults { -diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go -index c0b05103ded36..883f9b14226bc 100644 ---- a/pkg/querier/queryrange/roundtrip_test.go -+++ b/pkg/querier/queryrange/roundtrip_test.go -@@ -396,7 +396,14 @@ func TestInstantQueryTripperware(t *testing.T) { - } - - func TestSeriesTripperware(t *testing.T) { -- tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) -+ l := fakeLimits{ -+ maxQueryLength: 48 * time.Hour, -+ maxQueryParallelism: 1, -+ metadataSplitDuration: map[string]time.Duration{ -+ ""1"": 24 * time.Hour, -+ }, -+ } -+ tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, l, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) - if stopper != nil { - defer stopper.Stop() - } -@@ -427,7 +434,14 @@ func TestSeriesTripperware(t *testing.T) { - } - - func TestLabelsTripperware(t *testing.T) { -- tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) -+ l := fakeLimits{ -+ maxQueryLength: 48 * time.Hour, -+ maxQueryParallelism: 1, -+ metadataSplitDuration: map[string]time.Duration{ -+ ""1"": 24 * time.Hour, -+ }, -+ } -+ tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, l, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) - if stopper != nil { - defer stopper.Stop() - } -@@ -679,7 +693,7 @@ func TestNewTripperware_Caches(t *testing.T) { - err: """", - }, - { -- name: ""results cache enabled, stats cache disabled"", -+ name: ""results cache enabled"", - config: Config{ - Config: base.Config{ - CacheResults: true, -@@ -694,34 +708,32 @@ func TestNewTripperware_Caches(t *testing.T) { - }, - }, - }, -- CacheIndexStatsResults: false, - }, - numCaches: 1, - err: """", - }, - { -- name: ""results cache enabled, stats cache enabled"", -+ name: ""stats cache enabled"", - config: Config{ -- Config: base.Config{ -- CacheResults: true, -+ CacheIndexStatsResults: true, -+ StatsCacheConfig: IndexStatsCacheConfig{ - ResultsCacheConfig: base.ResultsCacheConfig{ - Config: resultscache.Config{ - CacheConfig: cache.Config{ - EmbeddedCache: cache.EmbeddedCacheConfig{ -- MaxSizeMB: 1, - Enabled: true, -+ MaxSizeMB: 1000, - }, - }, - }, - }, - }, -- CacheIndexStatsResults: true, - }, -- numCaches: 2, -+ numCaches: 1, - err: """", - }, - { -- name: ""results cache enabled, stats cache enabled but different"", -+ name: ""results cache enabled, stats cache enabled"", - config: Config{ - Config: base.Config{ - CacheResults: true, -@@ -763,11 +775,8 @@ func TestNewTripperware_Caches(t *testing.T) { - err: fmt.Sprintf(""%s cache is not configured"", stats.ResultCache), - }, - { -- name: ""results cache disabled, stats cache enabled (no config provided)"", -+ name: ""stats cache enabled (no config provided)"", - config: Config{ -- Config: base.Config{ -- CacheResults: false, -- }, - CacheIndexStatsResults: true, - }, - numCaches: 0, -@@ -1234,7 +1243,8 @@ type fakeLimits struct { - maxQueryLookback time.Duration - maxEntriesLimitPerQuery int - maxSeries int -- splits map[string]time.Duration -+ splitDuration map[string]time.Duration -+ metadataSplitDuration map[string]time.Duration - minShardingLookback time.Duration - queryTimeout time.Duration - requiredLabels []string -@@ -1246,10 +1256,17 @@ type fakeLimits struct { - } - - func (f fakeLimits) QuerySplitDuration(key string) time.Duration { -- if f.splits == nil { -+ if f.splitDuration == nil { -+ return 0 -+ } -+ return f.splitDuration[key] -+} -+ -+func (f fakeLimits) MetadataQuerySplitDuration(key string) time.Duration { -+ if f.metadataSplitDuration == nil { - return 0 - } -- return f.splits[key] -+ return f.metadataSplitDuration[key] - } - - func (f fakeLimits) MaxQueryLength(context.Context, string) time.Duration { -diff --git a/pkg/querier/queryrange/series_cache.go b/pkg/querier/queryrange/series_cache.go -new file mode 100644 -index 0000000000000..9ad67f70acf55 ---- /dev/null -+++ b/pkg/querier/queryrange/series_cache.go -@@ -0,0 +1,100 @@ -+package queryrange -+ -+import ( -+ ""context"" -+ ""flag"" -+ ""fmt"" -+ ""sort"" -+ strings ""strings"" -+ ""time"" -+ -+ ""github.com/go-kit/log"" -+ -+ ""github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"" -+ ""github.com/grafana/loki/pkg/storage/chunk/cache"" -+ ""github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"" -+) -+ -+type cacheKeySeries struct { -+ Limits -+ transformer UserIDTransformer -+} -+ -+// GenerateCacheKey generates a cache key based on the userID, matchers, split duration and the interval of the request. -+func (i cacheKeySeries) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string { -+ sr := r.(*LokiSeriesRequest) -+ split := i.MetadataQuerySplitDuration(userID) -+ -+ var currentInterval int64 -+ if denominator := int64(split / time.Millisecond); denominator > 0 { -+ currentInterval = sr.GetStart().UnixMilli() / denominator -+ } -+ -+ if i.transformer != nil { -+ userID = i.transformer(ctx, userID) -+ } -+ -+ matchers := sr.GetMatch() -+ sort.Strings(matchers) -+ matcherStr := strings.Join(matchers, "","") -+ -+ return fmt.Sprintf(""series:%s:%s:%d:%d"", userID, matcherStr, currentInterval, split) -+} -+ -+type seriesExtractor struct{} -+ -+// Extract extracts the series response for the specific time range. -+// It is a no-op since it is not possible to partition the series data by time range as it is just a list of kv pairs. -+func (p seriesExtractor) Extract(_, _ int64, res resultscache.Response, _, _ int64) resultscache.Response { -+ return res -+} -+ -+func (p seriesExtractor) ResponseWithoutHeaders(resp queryrangebase.Response) queryrangebase.Response { -+ seriesRes := resp.(*LokiSeriesResponse) -+ return &LokiSeriesResponse{ -+ Data: seriesRes.Data, -+ Status: seriesRes.Status, -+ Version: seriesRes.Version, -+ Statistics: seriesRes.Statistics, -+ } -+} -+ -+type SeriesCacheConfig struct { -+ queryrangebase.ResultsCacheConfig `yaml:"",inline""` -+} -+ -+// RegisterFlags registers flags. -+func (cfg *SeriesCacheConfig) RegisterFlags(f *flag.FlagSet) { -+ cfg.RegisterFlagsWithPrefix(f, ""frontend.series-results-cache."") -+} -+ -+func (cfg *SeriesCacheConfig) Validate() error { -+ return cfg.ResultsCacheConfig.Validate() -+} -+ -+func NewSeriesCacheMiddleware( -+ logger log.Logger, -+ limits Limits, -+ merger queryrangebase.Merger, -+ c cache.Cache, -+ cacheGenNumberLoader queryrangebase.CacheGenNumberLoader, -+ shouldCache queryrangebase.ShouldCacheFn, -+ parallelismForReq queryrangebase.ParallelismForReqFn, -+ retentionEnabled bool, -+ transformer UserIDTransformer, -+ metrics *queryrangebase.ResultsCacheMetrics, -+) (queryrangebase.Middleware, error) { -+ return queryrangebase.NewResultsCacheMiddleware( -+ logger, -+ c, -+ cacheKeySeries{limits, transformer}, -+ limits, -+ merger, -+ seriesExtractor{}, -+ cacheGenNumberLoader, -+ shouldCache, -+ parallelismForReq, -+ retentionEnabled, -+ metrics, -+ ) -+} -diff --git a/pkg/querier/queryrange/series_cache_test.go b/pkg/querier/queryrange/series_cache_test.go -new file mode 100644 -index 0000000000000..abe9920012172 ---- /dev/null -+++ b/pkg/querier/queryrange/series_cache_test.go -@@ -0,0 +1,314 @@ -+package queryrange -+ -+import ( -+ ""context"" -+ ""fmt"" -+ ""testing"" -+ ""time"" -+ -+ ""github.com/go-kit/log"" -+ ""github.com/grafana/dskit/user"" -+ ""github.com/stretchr/testify/require"" -+ -+ ""github.com/grafana/loki/pkg/loghttp"" -+ -+ ""github.com/grafana/loki/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/logqlmodel/stats"" -+ ""github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"" -+ ""github.com/grafana/loki/pkg/storage/chunk/cache"" -+ ""github.com/grafana/loki/pkg/util"" -+) -+ -+var ( -+ seriesAPIPath = ""/loki/api/v1/series"" -+) -+ -+func TestCacheKeySeries_GenerateCacheKey(t *testing.T) { -+ k := cacheKeySeries{ -+ transformer: nil, -+ Limits: fakeLimits{ -+ metadataSplitDuration: map[string]time.Duration{ -+ ""fake"": time.Hour, -+ }, -+ }, -+ } -+ -+ from, through := util.RoundToMilliseconds(testTime, testTime.Add(1*time.Hour)) -+ req := &LokiSeriesRequest{ -+ StartTs: from.Time(), -+ EndTs: through.Time(), -+ Match: []string{`{namespace=""prod""}`, `{service=""foo""}`}, -+ Path: seriesAPIPath, -+ } -+ -+ expectedInterval := testTime.UnixMilli() / time.Hour.Milliseconds() -+ require.Equal(t, fmt.Sprintf(`series:fake:{namespace=""prod""},{service=""foo""}:%d:%d`, expectedInterval, time.Hour.Nanoseconds()), k.GenerateCacheKey(context.Background(), ""fake"", req)) -+ -+ t.Run(""same set of matchers in any order should result in the same cache key"", func(t *testing.T) { -+ from, through := util.RoundToMilliseconds(testTime, testTime.Add(1*time.Hour)) -+ -+ for _, matchers := range [][]string{ -+ {`{cluster=""us-central""}`, `{namespace=""prod""}`, `{service=~""foo.*""}`}, -+ {`{namespace=""prod""}`, `{service=~""foo.*""}`, `{cluster=""us-central""}`}, -+ } { -+ req := &LokiSeriesRequest{ -+ StartTs: from.Time(), -+ EndTs: through.Time(), -+ Match: matchers, -+ Path: seriesAPIPath, -+ } -+ expectedInterval := testTime.UnixMilli() / time.Hour.Milliseconds() -+ require.Equal(t, fmt.Sprintf(`series:fake:{cluster=""us-central""},{namespace=""prod""},{service=~""foo.*""}:%d:%d`, expectedInterval, time.Hour.Nanoseconds()), k.GenerateCacheKey(context.Background(), ""fake"", req)) -+ } -+ -+ }) -+} -+ -+func TestSeriesCache(t *testing.T) { -+ setupCacheMW := func() queryrangebase.Middleware { -+ cacheMiddleware, err := NewSeriesCacheMiddleware( -+ log.NewNopLogger(), -+ fakeLimits{ -+ metadataSplitDuration: map[string]time.Duration{ -+ ""fake"": 24 * time.Hour, -+ }, -+ }, -+ DefaultCodec, -+ cache.NewMockCache(), -+ nil, -+ nil, -+ func(_ context.Context, _ []string, _ queryrangebase.Request) int { -+ return 1 -+ }, -+ false, -+ nil, -+ nil, -+ ) -+ require.NoError(t, err) -+ -+ return cacheMiddleware -+ } -+ -+ t.Run(""caches the response for the same request"", func(t *testing.T) { -+ cacheMiddleware := setupCacheMW() -+ from, through := util.RoundToMilliseconds(testTime, testTime.Add(1*time.Hour)) -+ -+ seriesReq := &LokiSeriesRequest{ -+ StartTs: from.Time(), -+ EndTs: through.Time(), -+ Match: []string{`{namespace=~"".*""}`}, -+ Path: seriesAPIPath, -+ } -+ -+ seriesResp := &LokiSeriesResponse{ -+ Status: ""success"", -+ Version: uint32(loghttp.VersionV1), -+ Data: []logproto.SeriesIdentifier{ -+ { -+ Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: ""cluster"", Value: ""eu-west""}, {Key: ""namespace"", Value: ""prod""}}, -+ }, -+ }, -+ Statistics: stats.Result{ -+ Summary: stats.Summary{ -+ Splits: 1, -+ }, -+ }, -+ } -+ -+ called := 0 -+ handler := cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { -+ called++ -+ -+ // should request the entire length with no partitioning as nothing is cached yet. -+ require.Equal(t, seriesReq.GetStart(), r.GetStart()) -+ require.Equal(t, seriesReq.GetEnd(), r.GetEnd()) -+ -+ return seriesResp, nil -+ })) -+ -+ ctx := user.InjectOrgID(context.Background(), ""fake"") -+ got, err := handler.Do(ctx, seriesReq) -+ require.NoError(t, err) -+ require.Equal(t, 1, called) // called actual handler, as not cached. -+ require.Equal(t, seriesResp, got) -+ -+ // Doing same request again shouldn't change anything. -+ called = 0 -+ got, err = handler.Do(ctx, seriesReq) -+ require.NoError(t, err) -+ require.Equal(t, 0, called) -+ require.Equal(t, seriesResp, got) -+ }) -+ -+ t.Run(""a new request with overlapping time range should reuse part of the previous request for the overlap"", func(t *testing.T) { -+ cacheMiddleware := setupCacheMW() -+ -+ from, through := util.RoundToMilliseconds(testTime, testTime.Add(1*time.Hour)) -+ req1 := &LokiSeriesRequest{ -+ StartTs: from.Time(), -+ EndTs: through.Time(), -+ Match: []string{`{namespace=~"".*""}`}, -+ Path: seriesAPIPath, -+ } -+ resp1 := &LokiSeriesResponse{ -+ Status: ""success"", -+ Version: uint32(loghttp.VersionV1), -+ Data: []logproto.SeriesIdentifier{ -+ { -+ Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: ""cluster"", Value: ""us-central""}, {Key: ""namespace"", Value: ""dev""}}, -+ }, -+ { -+ Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: ""cluster"", Value: ""eu-west""}, {Key: ""namespace"", Value: ""prod""}}, -+ }, -+ }, -+ Statistics: stats.Result{ -+ Summary: stats.Summary{ -+ Splits: 1, -+ }, -+ }, -+ } -+ -+ called := 0 -+ handler := cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { -+ called++ -+ -+ // should request the entire length with no partitioning as nothing is cached yet. -+ require.Equal(t, req1.GetStart(), r.GetStart()) -+ require.Equal(t, req1.GetEnd(), r.GetEnd()) -+ -+ return resp1, nil -+ })) -+ -+ ctx := user.InjectOrgID(context.Background(), ""fake"") -+ got, err := handler.Do(ctx, req1) -+ require.NoError(t, err) -+ require.Equal(t, 1, called) -+ require.Equal(t, resp1, got) -+ -+ req2 := req1.WithStartEnd(req1.GetStart().Add(15*time.Minute), req1.GetEnd().Add(15*time.Minute)) -+ -+ called = 0 -+ handler = cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { -+ called++ -+ -+ // make downstream request only for the non-overlapping portion of the query. -+ require.Equal(t, req1.GetEnd(), r.GetStart()) -+ require.Equal(t, req1.GetEnd().Add(15*time.Minute), r.GetEnd()) -+ -+ return &LokiSeriesResponse{ -+ Status: ""success"", -+ Version: uint32(loghttp.VersionV1), -+ Data: []logproto.SeriesIdentifier{ -+ { -+ Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: ""cluster"", Value: ""us-central""}, {Key: ""namespace"", Value: ""prod""}}, -+ }, -+ }, -+ Statistics: stats.Result{ -+ Summary: stats.Summary{ -+ Splits: 1, -+ }, -+ }, -+ }, nil -+ })) -+ -+ got, err = handler.Do(ctx, req2) -+ require.NoError(t, err) -+ require.Equal(t, 1, called) -+ // two splits as we merge the results from the extent and downstream request -+ resp1.Statistics.Summary.Splits = 2 -+ require.Equal(t, &LokiSeriesResponse{ -+ Status: ""success"", -+ Version: uint32(loghttp.VersionV1), -+ Data: []logproto.SeriesIdentifier{ -+ { -+ Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: ""cluster"", Value: ""us-central""}, {Key: ""namespace"", Value: ""dev""}}, -+ }, -+ { -+ Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: ""cluster"", Value: ""eu-west""}, {Key: ""namespace"", Value: ""prod""}}, -+ }, -+ { -+ Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: ""cluster"", Value: ""us-central""}, {Key: ""namespace"", Value: ""prod""}}, -+ }, -+ }, -+ Statistics: stats.Result{ -+ Summary: stats.Summary{ -+ Splits: 2, -+ }, -+ }, -+ }, got) -+ }) -+ -+ t.Run(""caches are only valid for the same request parameters"", func(t *testing.T) { -+ cacheMiddleware := setupCacheMW() -+ -+ from, through := util.RoundToMilliseconds(testTime, testTime.Add(1*time.Hour)) -+ seriesReq := &LokiSeriesRequest{ -+ StartTs: from.Time(), -+ EndTs: through.Time(), -+ Match: []string{`{namespace=~"".*""}`}, -+ Path: seriesAPIPath, -+ } -+ seriesResp := &LokiSeriesResponse{ -+ Status: ""success"", -+ Version: uint32(loghttp.VersionV1), -+ Data: []logproto.SeriesIdentifier{ -+ { -+ Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: ""cluster"", Value: ""eu-west""}, {Key: ""namespace"", Value: ""prod""}}, -+ }, -+ }, -+ Statistics: stats.Result{ -+ Summary: stats.Summary{ -+ Splits: 1, -+ }, -+ }, -+ } -+ -+ called := 0 -+ handler := cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { -+ called++ -+ -+ // should request the entire length as none of the subsequent queries hit the cache. -+ require.Equal(t, seriesReq.GetStart(), r.GetStart()) -+ require.Equal(t, seriesReq.GetEnd(), r.GetEnd()) -+ return seriesResp, nil -+ })) -+ -+ // initial call to fill cache -+ ctx := user.InjectOrgID(context.Background(), ""fake"") -+ _, err := handler.Do(ctx, seriesReq) -+ require.NoError(t, err) -+ require.Equal(t, 1, called) -+ -+ type testCase struct { -+ fn func(*LokiSeriesRequest) -+ user string -+ } -+ testCases := map[string]testCase{ -+ ""different match"": { -+ fn: func(req *LokiSeriesRequest) { -+ req.Match = append(req.Match, `{foo=""bar""}`) -+ }, -+ }, -+ ""different user"": { -+ user: ""fake2s"", -+ }, -+ } -+ -+ for name, tc := range testCases { -+ called = 0 -+ seriesReq := seriesReq -+ -+ if tc.fn != nil { -+ tc.fn(seriesReq) -+ } -+ -+ if tc.user != """" { -+ ctx = user.InjectOrgID(context.Background(), tc.user) -+ } -+ -+ _, err = handler.Do(ctx, seriesReq) -+ require.NoError(t, err) -+ require.Equal(t, 1, called, name) -+ } -+ }) -+} -diff --git a/pkg/querier/queryrange/split_by_interval.go b/pkg/querier/queryrange/split_by_interval.go -index d568fe65ddde8..9e2eda4b19423 100644 ---- a/pkg/querier/queryrange/split_by_interval.go -+++ b/pkg/querier/queryrange/split_by_interval.go -@@ -184,7 +184,14 @@ func (h *splitByInterval) Do(ctx context.Context, r queryrangebase.Request) (que - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } - -- interval := validation.MaxDurationOrZeroPerTenant(tenantIDs, h.limits.QuerySplitDuration) -+ var interval time.Duration -+ switch r.(type) { -+ case *LokiSeriesRequest, *LabelRequest: -+ interval = validation.MaxDurationOrZeroPerTenant(tenantIDs, h.limits.MetadataQuerySplitDuration) -+ default: -+ interval = validation.MaxDurationOrZeroPerTenant(tenantIDs, h.limits.QuerySplitDuration) -+ } -+ - // skip split by if unset - if interval == 0 { - return h.next.Do(ctx, r) -diff --git a/pkg/querier/queryrange/split_by_interval_test.go b/pkg/querier/queryrange/split_by_interval_test.go -index 58b78b820a51c..b236b88fb4d53 100644 ---- a/pkg/querier/queryrange/split_by_interval_test.go -+++ b/pkg/querier/queryrange/split_by_interval_test.go -@@ -828,7 +828,12 @@ func Test_series_splitByInterval_Do(t *testing.T) { - }, nil - }) - -- l := WithSplitByLimits(fakeLimits{maxQueryParallelism: 1}, time.Hour) -+ l := fakeLimits{ -+ maxQueryParallelism: 1, -+ metadataSplitDuration: map[string]time.Duration{ -+ ""1"": time.Hour, -+ }, -+ } - split := SplitByIntervalMiddleware( - testSchemas, - l, -diff --git a/pkg/querier/queryrange/split_by_range_test.go b/pkg/querier/queryrange/split_by_range_test.go -index ef25e3f910fb3..b1687611abc1d 100644 ---- a/pkg/querier/queryrange/split_by_range_test.go -+++ b/pkg/querier/queryrange/split_by_range_test.go -@@ -21,7 +21,7 @@ func Test_RangeVectorSplit(t *testing.T) { - srm := NewSplitByRangeMiddleware(log.NewNopLogger(), testEngineOpts, fakeLimits{ - maxSeries: 10000, - queryTimeout: time.Second, -- splits: map[string]time.Duration{ -+ splitDuration: map[string]time.Duration{ - ""tenant"": time.Minute, - }, - }, nilShardingMetrics) -diff --git a/pkg/util/marshal/legacy/marshal_test.go b/pkg/util/marshal/legacy/marshal_test.go -index 79f40c8990f4e..bc5ae29cd1085 100644 ---- a/pkg/util/marshal/legacy/marshal_test.go -+++ b/pkg/util/marshal/legacy/marshal_test.go -@@ -128,6 +128,24 @@ var queryTests = []struct { - ""requests"": 0, - ""downloadTime"": 0 - }, -+ ""seriesResult"": { -+ ""entriesFound"": 0, -+ ""entriesRequested"": 0, -+ ""entriesStored"": 0, -+ ""bytesReceived"": 0, -+ ""bytesSent"": 0, -+ ""requests"": 0, -+ ""downloadTime"": 0 -+ }, -+ ""labelResult"": { -+ ""entriesFound"": 0, -+ ""entriesRequested"": 0, -+ ""entriesStored"": 0, -+ ""bytesReceived"": 0, -+ ""bytesSent"": 0, -+ ""requests"": 0, -+ ""downloadTime"": 0 -+ }, - ""volumeResult"": { - ""entriesFound"": 0, - ""entriesRequested"": 0, -diff --git a/pkg/util/marshal/marshal_test.go b/pkg/util/marshal/marshal_test.go -index 6d816841daa16..87fe3fdca9329 100644 ---- a/pkg/util/marshal/marshal_test.go -+++ b/pkg/util/marshal/marshal_test.go -@@ -96,6 +96,24 @@ const emptyStats = `{ - ""requests"": 0, - ""downloadTime"": 0 - }, -+ ""seriesResult"": { -+ ""entriesFound"": 0, -+ ""entriesRequested"": 0, -+ ""entriesStored"": 0, -+ ""bytesReceived"": 0, -+ ""bytesSent"": 0, -+ ""requests"": 0, -+ ""downloadTime"": 0 -+ }, -+ ""labelResult"": { -+ ""entriesFound"": 0, -+ ""entriesRequested"": 0, -+ ""entriesStored"": 0, -+ ""bytesReceived"": 0, -+ ""bytesSent"": 0, -+ ""requests"": 0, -+ ""downloadTime"": 0 -+ }, - ""volumeResult"": { - ""entriesFound"": 0, - ""entriesRequested"": 0, -diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go -index cc55662aa27ef..7a1cab3e1e0e8 100644 ---- a/pkg/validation/limits.go -+++ b/pkg/validation/limits.go -@@ -104,12 +104,13 @@ type Limits struct { - QueryTimeout model.Duration `yaml:""query_timeout"" json:""query_timeout""` - - // Query frontend enforced limits. The default is actually parameterized by the queryrange config. -- QuerySplitDuration model.Duration `yaml:""split_queries_by_interval"" json:""split_queries_by_interval""` -- MinShardingLookback model.Duration `yaml:""min_sharding_lookback"" json:""min_sharding_lookback""` -- MaxQueryBytesRead flagext.ByteSize `yaml:""max_query_bytes_read"" json:""max_query_bytes_read""` -- MaxQuerierBytesRead flagext.ByteSize `yaml:""max_querier_bytes_read"" json:""max_querier_bytes_read""` -- VolumeEnabled bool `yaml:""volume_enabled"" json:""volume_enabled"" doc:""description=Enable log-volume endpoints.""` -- VolumeMaxSeries int `yaml:""volume_max_series"" json:""volume_max_series"" doc:""description=The maximum number of aggregated series in a log-volume response""` -+ QuerySplitDuration model.Duration `yaml:""split_queries_by_interval"" json:""split_queries_by_interval""` -+ MetadataQuerySplitDuration model.Duration `yaml:""split_metadata_queries_by_interval"" json:""split_metadata_queries_by_interval""` -+ MinShardingLookback model.Duration `yaml:""min_sharding_lookback"" json:""min_sharding_lookback""` -+ MaxQueryBytesRead flagext.ByteSize `yaml:""max_query_bytes_read"" json:""max_query_bytes_read""` -+ MaxQuerierBytesRead flagext.ByteSize `yaml:""max_querier_bytes_read"" json:""max_querier_bytes_read""` -+ VolumeEnabled bool `yaml:""volume_enabled"" json:""volume_enabled"" doc:""description=Enable log-volume endpoints.""` -+ VolumeMaxSeries int `yaml:""volume_max_series"" json:""volume_max_series"" doc:""description=The maximum number of aggregated series in a log-volume response""` - - // Ruler defaults and limits. - RulerMaxRulesPerRuleGroup int `yaml:""ruler_max_rules_per_rule_group"" json:""ruler_max_rules_per_rule_group""` -@@ -296,6 +297,9 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { - _ = l.QuerySplitDuration.Set(""1h"") - f.Var(&l.QuerySplitDuration, ""querier.split-queries-by-interval"", ""Split queries by a time interval and execute in parallel. The value 0 disables splitting by time. This also determines how cache keys are chosen when result caching is enabled."") - -+ _ = l.MetadataQuerySplitDuration.Set(""24h"") -+ f.Var(&l.MetadataQuerySplitDuration, ""querier.split-metadata-queries-by-interval"", ""Split metadata queries by a time interval and execute in parallel. The value 0 disables splitting metadata queries by time. This also determines how cache keys are chosen when label/series result caching is enabled."") -+ - f.StringVar(&l.DeletionMode, ""compactor.deletion-mode"", ""filter-and-delete"", ""Deletion mode. Can be one of 'disabled', 'filter-only', or 'filter-and-delete'. When set to 'filter-only' or 'filter-and-delete', and if retention_enabled is true, then the log entry deletion API endpoints are available."") - - // Deprecated -@@ -567,6 +571,11 @@ func (o *Overrides) QuerySplitDuration(userID string) time.Duration { - return time.Duration(o.getOverridesForUser(userID).QuerySplitDuration) - } - -+// MetadataQuerySplitDuration returns the tenant specific metadata splitby interval applied in the query frontend. -+func (o *Overrides) MetadataQuerySplitDuration(userID string) time.Duration { -+ return time.Duration(o.getOverridesForUser(userID).MetadataQuerySplitDuration) -+} -+ - // MaxQueryBytesRead returns the maximum bytes a query can read. - func (o *Overrides) MaxQueryBytesRead(_ context.Context, userID string) int { - return o.getOverridesForUser(userID).MaxQueryBytesRead.Val()",feat,"Support caching `/series` and `/labels` query results (#11539) - -**What this PR does / why we need it**: -Add support for caching metadata queries (both series and labels). -caching happens after splitting similar to other types of queries. - -This pr adds the following configs to enable them. -``` -cache_series_results: true|false (default false) -cache_label_results: true|false (default false) -``` -And the cache backend for them can be configured using -`series_results_cache` and `label_results_cache` blocks under the -`query_range` section. - -Currently the split interval for metadata queries is fixed and defaults -to 24h, this pr makes it configurable by introducing -`split_metadata_queries_by_interval` - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - -**Checklist** -- [x] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [x] Documentation added -- [x] Tests updated -- [ ] `CHANGELOG.md` updated -- [ ] If the change is worth mentioning in the release notes, add -`add-to-release-notes` label -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/setup/upgrade/_index.md` -- [ ] For Helm chart changes bump the Helm chart version in -`production/helm/loki/Chart.yaml` and update -`production/helm/loki/CHANGELOG.md` and -`production/helm/loki/README.md`. [Example -PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) -- [ ] If the change is deprecating or removing a configuration option, -update the `deprecated-config.yaml` and `deleted-config.yaml` files -respectively in the `tools/deprecated-config-checker` directory. -[Example -PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) - ---------- - -Signed-off-by: Kaviraj -Co-authored-by: Ashwanth Goli " -54320f27e3a046fa6e952c26174a37b9d31fd080,2023-10-26 22:01:40,steve-caron-grafana,"Update _index.md (#11052) - -Changed the title from ""Prometheus pipeline stages"" to ""Promtail -pipeline stages"" - -**What this PR does / why we need it**: - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - -**Checklist** -- [ ] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [ ] Documentation added -- [ ] Tests updated -- [ ] `CHANGELOG.md` updated -- [ ] If the change is worth mentioning in the release notes, add -`add-to-release-notes` label -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/setup/upgrade/_index.md` -- [ ] For Helm chart changes bump the Helm chart version in -`production/helm/loki/Chart.yaml` and update -`production/helm/loki/CHANGELOG.md` and -`production/helm/loki/README.md`. [Example -PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) -- [ ] If the change is deprecating or removing a configuration option, -update the `deprecated-config.yaml` and `deleted-config.yaml` files -respectively in the `tools/deprecated-config-checker` directory. - ---------- - -Co-authored-by: J Stickler ",False,"diff --git a/docs/sources/send-data/promtail/stages/_index.md b/docs/sources/send-data/promtail/stages/_index.md -index 32689e39d4d32..1530fedd4ad1b 100644 ---- a/docs/sources/send-data/promtail/stages/_index.md -+++ b/docs/sources/send-data/promtail/stages/_index.md -@@ -1,5 +1,5 @@ - --- --title: Prometheus pipeline stages -+title: Promtail pipeline stages - menuTitle: Pipeline stages - description: Overview of the Promtail pipeline stages. - aliases: -@@ -7,7 +7,7 @@ aliases: - weight: 700 - --- - --# Prometheus pipeline stages -+# Promtail pipeline stages - - This section is a collection of all stages Promtail supports in a - [Pipeline]({{< relref ""../pipelines"" >}}).",unknown,"Update _index.md (#11052) - -Changed the title from ""Prometheus pipeline stages"" to ""Promtail -pipeline stages"" - -**What this PR does / why we need it**: - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - -**Checklist** -- [ ] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [ ] Documentation added -- [ ] Tests updated -- [ ] `CHANGELOG.md` updated -- [ ] If the change is worth mentioning in the release notes, add -`add-to-release-notes` label -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/setup/upgrade/_index.md` -- [ ] For Helm chart changes bump the Helm chart version in -`production/helm/loki/Chart.yaml` and update -`production/helm/loki/CHANGELOG.md` and -`production/helm/loki/README.md`. [Example -PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) -- [ ] If the change is deprecating or removing a configuration option, -update the `deprecated-config.yaml` and `deleted-config.yaml` files -respectively in the `tools/deprecated-config-checker` directory. - ---------- - -Co-authored-by: J Stickler " -7264e0ef530ed56fc8dfd0926c95100e135b3228,2023-03-31 21:53:44,Ed Welch,"Loki: Remove global singleton of the tsdb.Store and keep it scoped to the storage.Store (#8928) - -**What this PR does / why we need it**: - -I made this change to fix an issue with the cmd/migrate tool which I'm -also sneaking a few fixes on in this PR too. - -We had a global singleton of the tsdb.Store which was leading to the -migrate tool reading from and writing to the `source` storage.Store -because it created the singleton with an object store client only -pointing to the source store. - -This PR changes the scoping of that singleton to not be global but -instead local to each instance of storage.Store - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - -**Checklist** -- [ ] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [ ] Documentation added -- [ ] Tests updated -- [ ] `CHANGELOG.md` updated -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/upgrading/_index.md` - -Signed-off-by: Edward Welch ",False,"diff --git a/cmd/migrate/main.go b/cmd/migrate/main.go -index 0660aee2ebe12..281ba05eaabcb 100644 ---- a/cmd/migrate/main.go -+++ b/cmd/migrate/main.go -@@ -23,6 +23,7 @@ import ( - ""github.com/grafana/loki/pkg/storage"" - ""github.com/grafana/loki/pkg/storage/chunk"" - ""github.com/grafana/loki/pkg/storage/config"" -+ ""github.com/grafana/loki/pkg/storage/stores/indexshipper"" - ""github.com/grafana/loki/pkg/util/cfg"" - util_log ""github.com/grafana/loki/pkg/util/log"" - ""github.com/grafana/loki/pkg/validation"" -@@ -93,9 +94,17 @@ func main() { - // Don't keep fetched index files for very long - sourceConfig.StorageConfig.BoltDBShipperConfig.CacheTTL = 30 * time.Minute - -+ sourceConfig.StorageConfig.BoltDBShipperConfig.Mode = indexshipper.ModeReadOnly -+ sourceConfig.StorageConfig.TSDBShipperConfig.Mode = indexshipper.ModeReadOnly -+ - // Shorten these timers up so we resync a little faster and clear index files a little quicker - destConfig.StorageConfig.IndexCacheValidity = 1 * time.Minute - destConfig.StorageConfig.BoltDBShipperConfig.ResyncInterval = 1 * time.Minute -+ destConfig.StorageConfig.TSDBShipperConfig.ResyncInterval = 1 * time.Minute -+ -+ // Don't want to use the index gateway for this, this makes sure the index files are properly uploaded when the store is stopped. -+ sourceConfig.StorageConfig.TSDBShipperConfig.IndexGatewayClientConfig.Disabled = true -+ destConfig.StorageConfig.TSDBShipperConfig.IndexGatewayClientConfig.Disabled = true - - // The long nature of queries requires stretching out the cardinality limit some and removing the query length limit - sourceConfig.LimitsConfig.CardinalityLimit = 1e9 -diff --git a/pkg/storage/store.go b/pkg/storage/store.go -index 0df7c89fc039b..41d9d03c8eab5 100644 ---- a/pkg/storage/store.go -+++ b/pkg/storage/store.go -@@ -76,6 +76,10 @@ type store struct { - logger log.Logger - - chunkFilterer chunk.RequestChunkFilterer -+ -+ // Keep a reference to the tsdb index store as we use one store for multiple schema period configs. -+ tsdbStore index.ReaderWriter -+ tsdbStoreStopFunc func() - } - - // NewStore creates a new Loki Store using configuration supplied. -@@ -246,20 +250,25 @@ func (s *store) storeForPeriod(p config.PeriodConfig, chunkClient client.Client, - } - } - -- indexReaderWriter, stopTSDBStoreFunc, err := tsdb.NewStore(s.cfg.TSDBShipperConfig, p, f, objectClient, s.limits, -- getIndexStoreTableRanges(config.TSDBType, s.schemaCfg.Configs), backupIndexWriter, indexClientReg) -- if err != nil { -- return nil, nil, nil, err -+ // We should only create one tsdb.Store per storage.Store and reuse it over all TSDB schema periods. -+ if s.tsdbStore == nil { -+ indexReaderWriter, stopTSDBStoreFunc, err := tsdb.NewStore(s.cfg.TSDBShipperConfig, p, f, objectClient, s.limits, -+ getIndexStoreTableRanges(config.TSDBType, s.schemaCfg.Configs), backupIndexWriter, indexClientReg) -+ if err != nil { -+ return nil, nil, nil, err -+ } -+ s.tsdbStore = indexReaderWriter -+ s.tsdbStoreStopFunc = stopTSDBStoreFunc - } - -- indexReaderWriter = index.NewMonitoredReaderWriter(indexReaderWriter, indexClientReg) -- chunkWriter := stores.NewChunkWriter(f, s.schemaCfg, indexReaderWriter, s.storeCfg.DisableIndexDeduplication) -+ indexReaderWriter := index.NewMonitoredReaderWriter(s.tsdbStore, indexClientReg) -+ chunkWriter := stores.NewChunkWriter(f, s.schemaCfg, s.tsdbStore, s.storeCfg.DisableIndexDeduplication) - - return chunkWriter, indexReaderWriter, - func() { - f.Stop() - chunkClient.Stop() -- stopTSDBStoreFunc() -+ s.tsdbStoreStopFunc() - objectClient.Stop() - backupStoreStop() - }, nil -diff --git a/pkg/storage/stores/tsdb/store.go b/pkg/storage/stores/tsdb/store.go -index 67ca42f3b34c4..6f0b6fc99ebb5 100644 ---- a/pkg/storage/stores/tsdb/store.go -+++ b/pkg/storage/stores/tsdb/store.go -@@ -35,72 +35,33 @@ type store struct { - stopOnce sync.Once - } - --var storeInstance *store -- --// This must only be called in test cases where a new store instances --// cannot be explicitly created. --func ResetStoreInstance() { -- if storeInstance == nil { -- return -- } -- storeInstance.Stop() -- storeInstance = nil --} -- --type newStoreFactoryFunc func( -- indexShipperCfg indexshipper.Config, -+// NewStore creates a new TSDB store. -+// This is meant to be a singleton and should be instantiated only once per storage.Store and reused for all schema configs. -+// We do not need to build store for each schema config since we do not do any schema specific handling yet. -+// If we do need to do schema specific handling, it would be a good idea to abstract away the handling since -+// running multiple head managers would be complicated and wasteful. -+// Note: The cmd/migrate tool needs this not to be a true global singleton -+// as it will create multiple storage.Store instances in the same process. -+func NewStore(indexShipperCfg indexshipper.Config, - p config.PeriodConfig, - f *fetcher.Fetcher, - objectClient client.ObjectClient, - limits downloads.Limits, - tableRanges config.TableRanges, - backupIndexWriter index.Writer, -- reg prometheus.Registerer, --) ( -- indexReaderWriter index.ReaderWriter, -- stopFunc func(), -- err error, --) -- --// NewStore creates a new store if not initialized already. --// Each call to NewStore will always build a new stores.ChunkWriter even if the store was already initialized since --// fetcher.Fetcher instances could be different due to periodic configs having different types of object storage configured --// for storing chunks. --// It also helps us make tsdb store a singleton because --// we do not need to build store for each schema config since we do not do any schema specific handling yet. --// If we do need to do schema specific handling, it would be a good idea to abstract away the handling since --// running multiple head managers would be complicated and wasteful. --var NewStore = func() newStoreFactoryFunc { -- return func( -- indexShipperCfg indexshipper.Config, -- p config.PeriodConfig, -- f *fetcher.Fetcher, -- objectClient client.ObjectClient, -- limits downloads.Limits, -- tableRanges config.TableRanges, -- backupIndexWriter index.Writer, -- reg prometheus.Registerer, -- ) ( -- index.ReaderWriter, -- func(), -- error, -- ) { -- if storeInstance == nil { -- if backupIndexWriter == nil { -- backupIndexWriter = noopBackupIndexWriter{} -- } -- storeInstance = &store{ -- backupIndexWriter: backupIndexWriter, -- } -- err := storeInstance.init(indexShipperCfg, objectClient, limits, tableRanges, reg) -- if err != nil { -- return nil, nil, err -- } -- } -- -- return storeInstance, storeInstance.Stop, nil -+ reg prometheus.Registerer) (index.ReaderWriter, func(), error) { -+ if backupIndexWriter == nil { -+ backupIndexWriter = noopBackupIndexWriter{} - } --}() -+ storeInstance := &store{ -+ backupIndexWriter: backupIndexWriter, -+ } -+ err := storeInstance.init(indexShipperCfg, objectClient, limits, tableRanges, reg) -+ if err != nil { -+ return nil, nil, err -+ } -+ return storeInstance, storeInstance.Stop, nil -+} - - func (s *store) init(indexShipperCfg indexshipper.Config, objectClient client.ObjectClient, - limits downloads.Limits, tableRanges config.TableRanges, reg prometheus.Registerer) error {",Loki,"Remove global singleton of the tsdb.Store and keep it scoped to the storage.Store (#8928) - -**What this PR does / why we need it**: - -I made this change to fix an issue with the cmd/migrate tool which I'm -also sneaking a few fixes on in this PR too. - -We had a global singleton of the tsdb.Store which was leading to the -migrate tool reading from and writing to the `source` storage.Store -because it created the singleton with an object store client only -pointing to the source store. - -This PR changes the scoping of that singleton to not be global but -instead local to each instance of storage.Store - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - -**Checklist** -- [ ] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [ ] Documentation added -- [ ] Tests updated -- [ ] `CHANGELOG.md` updated -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/upgrading/_index.md` - -Signed-off-by: Edward Welch " -c88a3688e0050ea529190e15053f41e57f39253c,2023-06-24 02:37:06,Ward Loos,"Helm - Loki: Fix empty starting line in runtimeConfig (#8657) - -**What this PR does / why we need it**: - -The `runtime` ConfigMap in the Loki Helm chart (introduced in -https://github.com/grafana/loki/commit/7aa596752d33e8a0445aec59c23642b83baf4328) -starts with an empty line because it improperly removes the current -line. - -```yaml ---- -# Source: loki/templates/runtime-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: loki-runtime -data: - runtime-config.yaml: | - - {} -``` - -While this generates technically valid YAML, it looks weird and -interacts badly with some tools. E.g.: - -- Python ruamel.yaml library: -https://stackoverflow.com/questions/75584262/ruamel-yaml-adds-incorrect-indentation-indicator -- VS Code which is set to trim whitespace on save (yellow highlighting -is for objects, not strings): - - - -This PR removes this empty start line: - -```yaml ---- -# Source: loki/templates/runtime-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: loki-runtime -data: - runtime-config.yaml: | - {} -``` - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - - - -**Checklist** -- [X] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [ ] Documentation added -- [ ] Tests updated -- [ ] `CHANGELOG.md` updated -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/upgrading/_index.md`",False,"diff --git a/production/helm/loki/templates/runtime-configmap.yaml b/production/helm/loki/templates/runtime-configmap.yaml -index 4463aee17f70c..2f38193da615d 100644 ---- a/production/helm/loki/templates/runtime-configmap.yaml -+++ b/production/helm/loki/templates/runtime-configmap.yaml -@@ -7,4 +7,4 @@ metadata: - {{- include ""loki.labels"" . | nindent 4 }} - data: - runtime-config.yaml: | -- {{ tpl (toYaml .Values.loki.runtimeConfig) . | nindent 4 }} -+ {{- tpl (toYaml .Values.loki.runtimeConfig) . | nindent 4 }}",unknown,"Helm - Loki: Fix empty starting line in runtimeConfig (#8657) - -**What this PR does / why we need it**: - -The `runtime` ConfigMap in the Loki Helm chart (introduced in -https://github.com/grafana/loki/commit/7aa596752d33e8a0445aec59c23642b83baf4328) -starts with an empty line because it improperly removes the current -line. - -```yaml ---- -# Source: loki/templates/runtime-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: loki-runtime -data: - runtime-config.yaml: | - - {} -``` - -While this generates technically valid YAML, it looks weird and -interacts badly with some tools. E.g.: - -- Python ruamel.yaml library: -https://stackoverflow.com/questions/75584262/ruamel-yaml-adds-incorrect-indentation-indicator -- VS Code which is set to trim whitespace on save (yellow highlighting -is for objects, not strings): - - - -This PR removes this empty start line: - -```yaml ---- -# Source: loki/templates/runtime-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: loki-runtime -data: - runtime-config.yaml: | - {} -``` - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - - - -**Checklist** -- [X] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [ ] Documentation added -- [ ] Tests updated -- [ ] `CHANGELOG.md` updated -- [ ] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/upgrading/_index.md`" -90cc715c3a7577d0c582aa1a832f02275520e9b4,2021-11-29 19:26:54,Danny Kopping,"Allow for setting of per-tenant runtime configs (#4840) - -Signed-off-by: Danny Kopping ",False,"diff --git a/production/ksonnet/loki/overrides.libsonnet b/production/ksonnet/loki/overrides.libsonnet -index f985c31b0e2ce..10260b12dcc53 100644 ---- a/production/ksonnet/loki/overrides.libsonnet -+++ b/production/ksonnet/loki/overrides.libsonnet -@@ -3,7 +3,7 @@ local k = import 'ksonnet-util/kausal.libsonnet'; - { - _config+: { - overrides: { -- // insert tenant overrides here. see https://github.com/grafana/loki/tree/master/docs/configuration#limits_config -+ // insert tenant overrides here. see https://grafana.com/docs/loki/latest/configuration/#limits_config - // - // 'tenant_x': { - // ingestion_rate_strategy: 'global', -@@ -15,6 +15,16 @@ local k = import 'ksonnet-util/kausal.libsonnet'; - // max_query_parallelism: 32, - // }, - }, -+ -+ runtimeConfigs: { -+ // insert runtime configs here. see pkg/runtime/config.go -+ // -+ // tenant_x: { -+ // log_stream_creation: true, -+ // log_push_request: true, -+ // log_push_request_streams: true, -+ // }, -+ }, - }, - local configMap = k.core.v1.configMap, - -@@ -24,6 +34,7 @@ local k = import 'ksonnet-util/kausal.libsonnet'; - 'overrides.yaml': k.util.manifestYaml( - { - overrides: $._config.overrides, -+ configs: $._config.runtimeConfigs, - } - ), - }),",unknown,"Allow for setting of per-tenant runtime configs (#4840) - -Signed-off-by: Danny Kopping " -0d505bb4408e407aa903e6c03c6c51207b5a3ca7,2024-03-05 22:15:20,Zirko,"fix: fix backend-world-egress CNP (#12128) - -Signed-off-by: QuantumEnigmaa ",False,"diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md -index 6ccc3eacd9949..85dc08d592feb 100644 ---- a/production/helm/loki/CHANGELOG.md -+++ b/production/helm/loki/CHANGELOG.md -@@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang - - [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) - -+## 5.43.5 -+ -+- [BUGFIX] Add `---` before the `backend-kubeapiserver-egress` ciliumnetworkpolicy to prevent the `backend-world-egress` one from being dumped if both are enabled. -+ - ## 5.43.4 - - - [ENHANCEMENT] Add `ciliumnetworkpolicies` with egress to world for write, read and backend. -diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml -index 5a752f0450856..3136c207e82a1 100644 ---- a/production/helm/loki/Chart.yaml -+++ b/production/helm/loki/Chart.yaml -@@ -3,7 +3,7 @@ name: loki - description: Helm chart for Grafana Loki in simple, scalable mode - type: application - appVersion: 2.9.4 --version: 5.43.4 -+version: 5.43.5 - home: https://grafana.github.io/helm-charts - sources: - - https://github.com/grafana/loki -diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md -index d2a946e1b9bc4..efc00aee09e23 100644 ---- a/production/helm/loki/README.md -+++ b/production/helm/loki/README.md -@@ -1,6 +1,6 @@ - # loki - --![Version: 5.43.4](https://img.shields.io/badge/Version-5.43.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.4](https://img.shields.io/badge/AppVersion-2.9.4-informational?style=flat-square) -+![Version: 5.43.5](https://img.shields.io/badge/Version-5.43.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.4](https://img.shields.io/badge/AppVersion-2.9.4-informational?style=flat-square) - - Helm chart for Grafana Loki in simple, scalable mode - -diff --git a/production/helm/loki/templates/ciliumnetworkpolicy.yaml b/production/helm/loki/templates/ciliumnetworkpolicy.yaml -index db83e83202eec..fb7b77386bb0b 100644 ---- a/production/helm/loki/templates/ciliumnetworkpolicy.yaml -+++ b/production/helm/loki/templates/ciliumnetworkpolicy.yaml -@@ -187,6 +187,7 @@ spec: - {{- end }} - - {{- if .Values.networkPolicy.egressKubeApiserver.enabled }} -+--- - apiVersion: ""cilium.io/v2"" - kind: CiliumNetworkPolicy - metadata:",fix,"fix backend-world-egress CNP (#12128) - -Signed-off-by: QuantumEnigmaa " -2ef5e6faf9f6e86660d7452dfe712e1260d98f5a,2021-12-29 23:13:33,Dylan Guedes,"Move `min_sharding_lookback` to the right place. (#5007) - -- This configuration is actually inside the limits section -- We shouldn't change the CLI flag to something different to not break - existing deployments",False,"diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md -index e95ac0797d98e..4f2a77e0422c2 100644 ---- a/docs/sources/configuration/_index.md -+++ b/docs/sources/configuration/_index.md -@@ -378,13 +378,6 @@ The `query_range` block configures query splitting and caching in the Loki query - # CLI flag: -querier.split-queries-by-interval - [split_queries_by_interval: | default = 0s] - --# Limit queries that can be sharded. --# Queries within the time range of now and now minus this sharding lookback --# are not sharded. The default value of 0s disables the lookback, causing --# sharding of all queries at all times. --# CLI flag: -frontend.min-sharding-lookback --[min_sharding_lookback: | default = 0s] -- - # Deprecated: Split queries by day and execute in parallel. - # Use -querier.split-queries-by-interval instead. - # CLI flag: -querier.split-queries-by-day -@@ -2160,6 +2153,13 @@ The `limits_config` block configures global and per-tenant limits in Loki. - # Retry upon receiving a 429 status code from the remote-write storage. - # This is experimental and might change in the future. - [ruler_remote_write_queue_retry_on_ratelimit: ] -+ -+# Limit queries that can be sharded. -+# Queries within the time range of now and now minus this sharding lookback -+# are not sharded. The default value of 0s disables the lookback, causing -+# sharding of all queries at all times. -+# CLI flag: -frontend.min-sharding-lookback -+[min_sharding_lookback: | default = 0s] - ``` - - ### grpc_client_config",unknown,"Move `min_sharding_lookback` to the right place. (#5007) - -- This configuration is actually inside the limits section -- We shouldn't change the CLI flag to something different to not break - existing deployments" -b8221dc68fb9b4a6a4f8689dedb8ed4e76a5d10d,2023-03-23 00:17:01,J Stickler,"Correcting typos merged in #8870. (#8873) - -**Which issue(s) this PR fixes**: -Fixes typos introduced in #8870",False,"diff --git a/docs/sources/clients/aws/ec2/_index.md b/docs/sources/clients/aws/ec2/_index.md -index a1b774d1be768..b653737b338bb 100644 ---- a/docs/sources/clients/aws/ec2/_index.md -+++ b/docs/sources/clients/aws/ec2/_index.md -@@ -8,7 +8,7 @@ In this tutorial we're going to setup [Promtail]({{< relref ""../../promtail/"" >} - - - --- [Running Promtail on AWS EC2](#ec2) -+- [EC2](#ec2) - - [Requirements](#requirements) - - [Creating an EC2 instance](#creating-an-ec2-instance) - - [Setting up Promtail](#setting-up-promtail) -@@ -156,8 +156,8 @@ Finally the [`relabeling_configs`][relabel] section has three purposes: - - 2. Choosing where Promtail should find log files to tail, in our example we want to include all log files that exist in `/var/log` using the glob `/var/log/**.log`. If you need to use multiple glob, you can simply add another job in your `scrape_configs`. - --3. Ensuring discovered targets are only for the machine Promtail currently runs on. This is achieve by adding the label `__host__` using the incoming metadata `__meta_ec2_private_dns_name`. If it doesn't match the current `HOSTNAME` environnement variable, the target will be dropped. --If `__meta_ec2_private_dns_name` doesn't match your instance's hosname (on EC2 Windows instance for example, where it is the IP address and not the hostname), you can hardcode the hotname at this stage, or check if any of the instances tag contain the hostname (`__meta_ec2_tag_: each tag value of the instance`) -+3. Ensuring discovered targets are only for the machine Promtail currently runs on. This is achieved by adding the label `__host__` using the incoming metadata `__meta_ec2_private_dns_name`. If it doesn't match the current `HOSTNAME` environment variable, the target will be dropped. -+If `__meta_ec2_private_dns_name` doesn't match your instance's hostname (on EC2 Windows instance for example, where it is the IP address and not the hostname), you can hardcode the hostname at this stage, or check if any of the instances tag contain the hostname (`__meta_ec2_tag_: each tag value of the instance`) - - Alright we should be ready to fire up Promtail, we're going to run it using the flag `--dry-run`. This is perfect to ensure everything is correctly, specially when you're still playing around with the configuration. Don't worry when using this mode, Promtail won't send any logs and won't remember any file positions.",unknown,"Correcting typos merged in #8870. (#8873) - -**Which issue(s) this PR fixes**: -Fixes typos introduced in #8870" -b830d65bdde8a56076100253e55b34e7b6cc01fa,2021-07-09 00:59:31,Nick Pillitteri,"Use the Cortex wrapper for getting tenant ID from a context (#3973) - -This change replaces all uses of the weaveworks/common method -for getting tenant/org ID with the Cortex wrapper which performs -some extra validation. - -Signed-off-by: Nick Pillitteri ",False,"diff --git a/clients/pkg/promtail/targets/lokipush/pushtarget.go b/clients/pkg/promtail/targets/lokipush/pushtarget.go -index 42eabf94f31fb..d1008cf7f2798 100644 ---- a/clients/pkg/promtail/targets/lokipush/pushtarget.go -+++ b/clients/pkg/promtail/targets/lokipush/pushtarget.go -@@ -7,6 +7,7 @@ import ( - ""strings"" - ""time"" - -+ ""github.com/cortexproject/cortex/pkg/tenant"" - util_log ""github.com/cortexproject/cortex/pkg/util/log"" - ""github.com/go-kit/kit/log"" - ""github.com/go-kit/kit/log/level"" -@@ -16,7 +17,6 @@ import ( - ""github.com/prometheus/prometheus/pkg/relabel"" - promql_parser ""github.com/prometheus/prometheus/promql/parser"" - ""github.com/weaveworks/common/server"" -- ""github.com/weaveworks/common/user"" - - ""github.com/grafana/loki/clients/pkg/promtail/api"" - ""github.com/grafana/loki/clients/pkg/promtail/scrapeconfig"" -@@ -106,7 +106,7 @@ func (t *PushTarget) run() error { - - func (t *PushTarget) handle(w http.ResponseWriter, r *http.Request) { - logger := util_log.WithContext(r.Context(), util_log.Logger) -- userID, _ := user.ExtractOrgID(r.Context()) -+ userID, _ := tenant.TenantID(r.Context()) - req, err := push.ParseRequest(logger, userID, r, nil) - if err != nil { - level.Warn(t.logger).Log(""msg"", ""failed to parse incoming push request"", ""err"", err.Error()) -diff --git a/cmd/migrate/main.go b/cmd/migrate/main.go -index 2147bd0a81e03..7b3efebd8bda2 100644 ---- a/cmd/migrate/main.go -+++ b/cmd/migrate/main.go -@@ -13,6 +13,7 @@ import ( - ""time"" - - cortex_storage ""github.com/cortexproject/cortex/pkg/chunk/storage"" -+ ""github.com/cortexproject/cortex/pkg/tenant"" - util_log ""github.com/cortexproject/cortex/pkg/util/log"" - ""github.com/prometheus/client_golang/prometheus"" - ""github.com/prometheus/common/model"" -@@ -134,7 +135,7 @@ func main() { - ctx := context.Background() - // This is a little weird but it was the easiest way to guarantee the userID is in the right format - ctx = user.InjectOrgID(ctx, *source) -- userID, err := user.ExtractOrgID(ctx) -+ userID, err := tenant.TenantID(ctx) - if err != nil { - panic(err) - } -diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go -index 80b1f51350f6c..6fc896ac0f51e 100644 ---- a/pkg/distributor/distributor.go -+++ b/pkg/distributor/distributor.go -@@ -9,6 +9,7 @@ import ( - cortex_distributor ""github.com/cortexproject/cortex/pkg/distributor"" - ""github.com/cortexproject/cortex/pkg/ring"" - ring_client ""github.com/cortexproject/cortex/pkg/ring/client"" -+ ""github.com/cortexproject/cortex/pkg/tenant"" - ""github.com/cortexproject/cortex/pkg/util/limiter"" - util_log ""github.com/cortexproject/cortex/pkg/util/log"" - ""github.com/cortexproject/cortex/pkg/util/services"" -@@ -191,7 +192,7 @@ type pushTracker struct { - - // Push a set of streams. - func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*logproto.PushResponse, error) { -- userID, err := user.ExtractOrgID(ctx) -+ userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } -diff --git a/pkg/distributor/http.go b/pkg/distributor/http.go -index 880dce10676b0..70a87de5e045b 100644 ---- a/pkg/distributor/http.go -+++ b/pkg/distributor/http.go -@@ -4,10 +4,10 @@ import ( - ""net/http"" - ""strings"" - -+ ""github.com/cortexproject/cortex/pkg/tenant"" - util_log ""github.com/cortexproject/cortex/pkg/util/log"" - ""github.com/go-kit/kit/log/level"" - ""github.com/weaveworks/common/httpgrpc"" -- ""github.com/weaveworks/common/user"" - - ""github.com/grafana/loki/pkg/loghttp/push"" - ) -@@ -15,7 +15,7 @@ import ( - // PushHandler reads a snappy-compressed proto from the HTTP body. - func (d *Distributor) PushHandler(w http.ResponseWriter, r *http.Request) { - logger := util_log.WithContext(r.Context(), util_log.Logger) -- userID, _ := user.ExtractOrgID(r.Context()) -+ userID, _ := tenant.TenantID(r.Context()) - req, err := push.ParseRequest(logger, userID, r, d.tenantsRetention) - if err != nil { - if d.tenantConfigs.LogPushRequest(userID) { -diff --git a/pkg/ingester/flush.go b/pkg/ingester/flush.go -index 883690cbcca08..d46727aa1d077 100644 ---- a/pkg/ingester/flush.go -+++ b/pkg/ingester/flush.go -@@ -9,6 +9,10 @@ import ( - - ""golang.org/x/net/context"" - -+ ""github.com/cortexproject/cortex/pkg/chunk"" -+ ""github.com/cortexproject/cortex/pkg/tenant"" -+ ""github.com/cortexproject/cortex/pkg/util"" -+ util_log ""github.com/cortexproject/cortex/pkg/util/log"" - ""github.com/go-kit/kit/log/level"" - ""github.com/prometheus/client_golang/prometheus"" - ""github.com/prometheus/client_golang/prometheus/promauto"" -@@ -16,10 +20,6 @@ import ( - ""github.com/prometheus/prometheus/pkg/labels"" - ""github.com/weaveworks/common/user"" - -- ""github.com/cortexproject/cortex/pkg/chunk"" -- ""github.com/cortexproject/cortex/pkg/util"" -- util_log ""github.com/cortexproject/cortex/pkg/util/log"" -- - ""github.com/grafana/loki/pkg/chunkenc"" - loki_util ""github.com/grafana/loki/pkg/util"" - ) -@@ -335,7 +335,7 @@ func (i *Ingester) removeFlushedChunks(instance *instance, stream *stream, mayRe - } - - func (i *Ingester) flushChunks(ctx context.Context, fp model.Fingerprint, labelPairs labels.Labels, cs []*chunkDesc, chunkMtx sync.Locker) error { -- userID, err := user.ExtractOrgID(ctx) -+ userID, err := tenant.TenantID(ctx) - if err != nil { - return err - } -diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go -index 3719e9988499b..9daf77b651aae 100644 ---- a/pkg/ingester/flush_test.go -+++ b/pkg/ingester/flush_test.go -@@ -19,6 +19,7 @@ import ( - ""github.com/cortexproject/cortex/pkg/chunk"" - ""github.com/cortexproject/cortex/pkg/ring"" - ""github.com/cortexproject/cortex/pkg/ring/kv"" -+ ""github.com/cortexproject/cortex/pkg/tenant"" - ""github.com/cortexproject/cortex/pkg/util/flagext"" - ""github.com/cortexproject/cortex/pkg/util/services"" - -@@ -297,7 +298,7 @@ func (s *testStore) Put(ctx context.Context, chunks []chunk.Chunk) error { - if s.onPut != nil { - return s.onPut(ctx, chunks) - } -- userID, err := user.ExtractOrgID(ctx) -+ userID, err := tenant.TenantID(ctx) - if err != nil { - return err - } -diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go -index ff80d96731c3d..f612da626a816 100644 ---- a/pkg/ingester/ingester.go -+++ b/pkg/ingester/ingester.go -@@ -11,6 +11,7 @@ import ( - - ""github.com/cortexproject/cortex/pkg/chunk"" - ""github.com/cortexproject/cortex/pkg/ring"" -+ ""github.com/cortexproject/cortex/pkg/tenant"" - ""github.com/cortexproject/cortex/pkg/util"" - util_log ""github.com/cortexproject/cortex/pkg/util/log"" - ""github.com/cortexproject/cortex/pkg/util/services"" -@@ -20,7 +21,6 @@ import ( - ""github.com/prometheus/client_golang/prometheus/promauto"" - ""github.com/prometheus/common/model"" - ""github.com/prometheus/prometheus/pkg/labels"" -- ""github.com/weaveworks/common/user"" - ""google.golang.org/grpc/health/grpc_health_v1"" - - ""github.com/grafana/loki/pkg/chunkenc"" -@@ -387,7 +387,7 @@ func (i *Ingester) ShutdownHandler(w http.ResponseWriter, r *http.Request) { - - // Push implements logproto.Pusher. - func (i *Ingester) Push(ctx context.Context, req *logproto.PushRequest) (*logproto.PushResponse, error) { -- instanceID, err := user.ExtractOrgID(ctx) -+ instanceID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } else if i.readonly { -@@ -421,7 +421,7 @@ func (i *Ingester) Query(req *logproto.QueryRequest, queryServer logproto.Querie - ctx := stats.NewContext(queryServer.Context()) - defer stats.SendAsTrailer(ctx, queryServer) - -- instanceID, err := user.ExtractOrgID(ctx) -+ instanceID, err := tenant.TenantID(ctx) - if err != nil { - return err - } -@@ -462,7 +462,7 @@ func (i *Ingester) QuerySample(req *logproto.SampleQueryRequest, queryServer log - ctx := stats.NewContext(queryServer.Context()) - defer stats.SendAsTrailer(ctx, queryServer) - -- instanceID, err := user.ExtractOrgID(ctx) -+ instanceID, err := tenant.TenantID(ctx) - if err != nil { - return err - } -@@ -516,7 +516,7 @@ func (i *Ingester) boltdbShipperMaxLookBack() time.Duration { - - // GetChunkIDs is meant to be used only when using an async store like boltdb-shipper. - func (i *Ingester) GetChunkIDs(ctx context.Context, req *logproto.GetChunkIDsRequest) (*logproto.GetChunkIDsResponse, error) { -- orgID, err := user.ExtractOrgID(ctx) -+ orgID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } -@@ -555,12 +555,12 @@ func (i *Ingester) GetChunkIDs(ctx context.Context, req *logproto.GetChunkIDsReq - - // Label returns the set of labels for the stream this ingester knows about. - func (i *Ingester) Label(ctx context.Context, req *logproto.LabelRequest) (*logproto.LabelResponse, error) { -- instanceID, err := user.ExtractOrgID(ctx) -+ userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - -- instance := i.getOrCreateInstance(instanceID) -+ instance := i.getOrCreateInstance(userID) - resp, err := instance.Label(ctx, req) - if err != nil { - return nil, err -@@ -579,11 +579,6 @@ func (i *Ingester) Label(ctx context.Context, req *logproto.LabelRequest) (*logp - return resp, nil - } - -- userID, err := user.ExtractOrgID(ctx) -- if err != nil { -- return nil, err -- } -- - maxLookBackPeriod := i.cfg.QueryStoreMaxLookBackPeriod - if boltdbShipperMaxLookBack != 0 { - maxLookBackPeriod = boltdbShipperMaxLookBack -@@ -615,7 +610,7 @@ func (i *Ingester) Label(ctx context.Context, req *logproto.LabelRequest) (*logp - - // Series queries the ingester for log stream identifiers (label sets) matching a set of matchers - func (i *Ingester) Series(ctx context.Context, req *logproto.SeriesRequest) (*logproto.SeriesResponse, error) { -- instanceID, err := user.ExtractOrgID(ctx) -+ instanceID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } -@@ -671,7 +666,7 @@ func (i *Ingester) Tail(req *logproto.TailRequest, queryServer logproto.Querier_ - default: - } - -- instanceID, err := user.ExtractOrgID(queryServer.Context()) -+ instanceID, err := tenant.TenantID(queryServer.Context()) - if err != nil { - return err - } -@@ -691,7 +686,7 @@ func (i *Ingester) Tail(req *logproto.TailRequest, queryServer logproto.Querier_ - - // TailersCount returns count of active tail requests from a user - func (i *Ingester) TailersCount(ctx context.Context, in *logproto.TailersCountRequest) (*logproto.TailersCountResponse, error) { -- instanceID, err := user.ExtractOrgID(ctx) -+ instanceID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } -diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go -index 860eb1ae89561..0a8a5d9c677c5 100644 ---- a/pkg/ingester/ingester_test.go -+++ b/pkg/ingester/ingester_test.go -@@ -11,6 +11,7 @@ import ( - ""github.com/prometheus/prometheus/pkg/labels"" - - ""github.com/cortexproject/cortex/pkg/chunk"" -+ ""github.com/cortexproject/cortex/pkg/tenant"" - ""github.com/cortexproject/cortex/pkg/util/flagext"" - ""github.com/cortexproject/cortex/pkg/util/services"" - ""github.com/stretchr/testify/require"" -@@ -260,7 +261,7 @@ func (s *mockStore) Put(ctx context.Context, chunks []chunk.Chunk) error { - s.mtx.Lock() - defer s.mtx.Unlock() - -- userid, err := user.ExtractOrgID(ctx) -+ userid, err := tenant.TenantID(ctx) - if err != nil { - return err - } -diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go -index e36d551ba824a..ddf04d01541e6 100644 ---- a/pkg/logql/engine.go -+++ b/pkg/logql/engine.go -@@ -8,6 +8,7 @@ import ( - ""sort"" - ""time"" - -+ ""github.com/cortexproject/cortex/pkg/tenant"" - ""github.com/cortexproject/cortex/pkg/util/spanlogger"" - ""github.com/go-kit/kit/log/level"" - ""github.com/prometheus/client_golang/prometheus"" -@@ -15,7 +16,6 @@ import ( - ""github.com/prometheus/prometheus/pkg/labels"" - ""github.com/prometheus/prometheus/promql"" - promql_parser ""github.com/prometheus/prometheus/promql/parser"" -- ""github.com/weaveworks/common/user"" - - ""github.com/grafana/loki/pkg/iter"" - ""github.com/grafana/loki/pkg/logproto"" -@@ -177,7 +177,7 @@ func (q *query) evalSample(ctx context.Context, expr SampleExpr) (promql_parser. - return q.evalLiteral(ctx, lit) - } - -- userID, err := user.ExtractOrgID(ctx) -+ userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } -diff --git a/pkg/querier/http.go b/pkg/querier/http.go -index 73ed9f2642406..ccb572ea9636a 100644 ---- a/pkg/querier/http.go -+++ b/pkg/querier/http.go -@@ -5,13 +5,13 @@ import ( - ""net/http"" - ""time"" - -+ ""github.com/cortexproject/cortex/pkg/tenant"" - util_log ""github.com/cortexproject/cortex/pkg/util/log"" - ""github.com/go-kit/kit/log/level"" - ""github.com/gorilla/websocket"" - ""github.com/prometheus/prometheus/pkg/labels"" - ""github.com/prometheus/prometheus/promql/parser"" - ""github.com/weaveworks/common/httpgrpc"" -- ""github.com/weaveworks/common/user"" - - ""github.com/grafana/loki/pkg/loghttp"" - loghttp_legacy ""github.com/grafana/loki/pkg/loghttp/legacy"" -@@ -347,7 +347,7 @@ func parseRegexQuery(httpRequest *http.Request) (string, error) { - } - - func (q *Querier) validateEntriesLimits(ctx context.Context, query string, limit uint32) error { -- userID, err := user.ExtractOrgID(ctx) -+ userID, err := tenant.TenantID(ctx) - if err != nil { - return httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } -diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go -index 2c6945b25ceba..f3edaa9cf7b70 100644 ---- a/pkg/querier/querier.go -+++ b/pkg/querier/querier.go -@@ -6,14 +6,14 @@ import ( - ""net/http"" - ""time"" - -- ""github.com/go-kit/kit/log/level"" - ""github.com/prometheus/common/model"" - ""github.com/weaveworks/common/httpgrpc"" -- ""github.com/weaveworks/common/user"" - ""google.golang.org/grpc/health/grpc_health_v1"" - -+ ""github.com/cortexproject/cortex/pkg/tenant"" - ""github.com/cortexproject/cortex/pkg/util/spanlogger"" - cortex_validation ""github.com/cortexproject/cortex/pkg/util/validation"" -+ ""github.com/go-kit/kit/log/level"" - - ""github.com/grafana/loki/pkg/iter"" - ""github.com/grafana/loki/pkg/loghttp"" -@@ -254,7 +254,7 @@ func (q *Querier) buildQueryIntervals(queryStart, queryEnd time.Time) (*interval - - // Label does the heavy lifting for a Label query. - func (q *Querier) Label(ctx context.Context, req *logproto.LabelRequest) (*logproto.LabelResponse, error) { -- userID, err := user.ExtractOrgID(ctx) -+ userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } -@@ -358,7 +358,7 @@ func (q *Querier) Tail(ctx context.Context, req *logproto.TailRequest) (*Tailer, - - // Series fetches any matching series for a list of matcher sets - func (q *Querier) Series(ctx context.Context, req *logproto.SeriesRequest) (*logproto.SeriesResponse, error) { -- userID, err := user.ExtractOrgID(ctx) -+ userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } -@@ -484,7 +484,7 @@ func (q *Querier) seriesForMatcher(ctx context.Context, from, through time.Time, - } - - func (q *Querier) validateQueryRequest(ctx context.Context, req logql.QueryParams) (time.Time, time.Time, error) { -- userID, err := user.ExtractOrgID(ctx) -+ userID, err := tenant.TenantID(ctx) - if err != nil { - return time.Time{}, time.Time{}, err - } -@@ -532,7 +532,7 @@ func validateQueryTimeRangeLimits(ctx context.Context, userID string, limits tim - } - - func (q *Querier) checkTailRequestLimit(ctx context.Context) error { -- userID, err := user.ExtractOrgID(ctx) -+ userID, err := tenant.TenantID(ctx) - if err != nil { - return err - } -diff --git a/pkg/querier/queryrange/limits.go b/pkg/querier/queryrange/limits.go -index 91f4bdf73bac0..afb52c29a5915 100644 ---- a/pkg/querier/queryrange/limits.go -+++ b/pkg/querier/queryrange/limits.go -@@ -9,6 +9,7 @@ import ( - - ""github.com/cortexproject/cortex/pkg/cortexpb"" - ""github.com/cortexproject/cortex/pkg/querier/queryrange"" -+ ""github.com/cortexproject/cortex/pkg/tenant"" - ""github.com/opentracing/opentracing-go"" - ""github.com/weaveworks/common/httpgrpc"" - ""github.com/weaveworks/common/user"" -@@ -200,7 +201,7 @@ func (rt limitedRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) - if span := opentracing.SpanFromContext(ctx); span != nil { - request.LogToSpan(span) - } -- userid, err := user.ExtractOrgID(ctx) -+ userid, err := tenant.TenantID(ctx) - if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } -diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go -index bfe6e41a7fa9a..758c576c7b3b7 100644 ---- a/pkg/querier/queryrange/roundtrip.go -+++ b/pkg/querier/queryrange/roundtrip.go -@@ -10,11 +10,11 @@ import ( - ""github.com/cortexproject/cortex/pkg/chunk"" - ""github.com/cortexproject/cortex/pkg/chunk/cache"" - ""github.com/cortexproject/cortex/pkg/querier/queryrange"" -+ ""github.com/cortexproject/cortex/pkg/tenant"" - ""github.com/go-kit/kit/log"" - ""github.com/prometheus/client_golang/prometheus"" - ""github.com/prometheus/prometheus/pkg/labels"" - ""github.com/weaveworks/common/httpgrpc"" -- ""github.com/weaveworks/common/user"" - - ""github.com/grafana/loki/pkg/loghttp"" - ""github.com/grafana/loki/pkg/logql"" -@@ -176,7 +176,7 @@ func transformRegexQuery(req *http.Request, expr logql.LogSelectorExpr) (logql.L - - // validates log entries limits - func validateLimits(req *http.Request, reqLimit uint32, limits Limits) error { -- userID, err := user.ExtractOrgID(req.Context()) -+ userID, err := tenant.TenantID(req.Context()) - if err != nil { - return httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } -diff --git a/pkg/querier/queryrange/split_by_interval.go b/pkg/querier/queryrange/split_by_interval.go -index 18947d3673071..33919ace9ff43 100644 ---- a/pkg/querier/queryrange/split_by_interval.go -+++ b/pkg/querier/queryrange/split_by_interval.go -@@ -6,12 +6,12 @@ import ( - ""time"" - - ""github.com/cortexproject/cortex/pkg/querier/queryrange"" -+ ""github.com/cortexproject/cortex/pkg/tenant"" - ""github.com/opentracing/opentracing-go"" - otlog ""github.com/opentracing/opentracing-go/log"" - ""github.com/prometheus/client_golang/prometheus"" - ""github.com/prometheus/client_golang/prometheus/promauto"" - ""github.com/weaveworks/common/httpgrpc"" -- ""github.com/weaveworks/common/user"" - - ""github.com/grafana/loki/pkg/logproto"" - ) -@@ -159,7 +159,7 @@ func (h *splitByInterval) loop(ctx context.Context, ch <-chan *lokiResult, next - } - - func (h *splitByInterval) Do(ctx context.Context, r queryrange.Request) (queryrange.Response, error) { -- userid, err := user.ExtractOrgID(ctx) -+ userid, err := tenant.TenantID(ctx) - if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } -diff --git a/pkg/storage/store.go b/pkg/storage/store.go -index 42b839586864c..4c12e83de4e5c 100644 ---- a/pkg/storage/store.go -+++ b/pkg/storage/store.go -@@ -11,13 +11,13 @@ import ( - cortex_local ""github.com/cortexproject/cortex/pkg/chunk/local"" - ""github.com/cortexproject/cortex/pkg/chunk/storage"" - ""github.com/cortexproject/cortex/pkg/querier/astmapper"" -+ ""github.com/cortexproject/cortex/pkg/tenant"" - ""github.com/cortexproject/cortex/pkg/util/flagext"" - ""github.com/go-kit/kit/log"" - ""github.com/go-kit/kit/log/level"" - ""github.com/prometheus/client_golang/prometheus"" - ""github.com/prometheus/common/model"" - ""github.com/prometheus/prometheus/pkg/labels"" -- ""github.com/weaveworks/common/user"" - - ""github.com/grafana/loki/pkg/iter"" - ""github.com/grafana/loki/pkg/logproto"" -@@ -196,7 +196,7 @@ func (s *store) SetChunkFilterer(chunkFilterer RequestChunkFilterer) { - - // lazyChunks is an internal function used to resolve a set of lazy chunks from the store without actually loading them. It's used internally by `LazyQuery` and `GetSeries` - func (s *store) lazyChunks(ctx context.Context, matchers []*labels.Matcher, from, through model.Time) ([]*LazyChunk, error) { -- userID, err := user.ExtractOrgID(ctx) -+ userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - }",unknown,"Use the Cortex wrapper for getting tenant ID from a context (#3973) - -This change replaces all uses of the weaveworks/common method -for getting tenant/org ID with the Cortex wrapper which performs -some extra validation. - -Signed-off-by: Nick Pillitteri " -adeff793a222e786bc62758a9d96bf314b3e3d03,2024-11-03 00:22:42,Paul Rogers,chore: Update lambda promtail nix checksum (#14745),False,"diff --git a/nix/packages/loki.nix b/nix/packages/loki.nix -index deab8e6b1307d..ebfb467ccda13 100644 ---- a/nix/packages/loki.nix -+++ b/nix/packages/loki.nix -@@ -5,7 +5,7 @@ let - pname = ""lambda-promtail""; - - src = ./../../tools/lambda-promtail; -- vendorHash = ""sha256-zEN42vbw4mWtU8KOUi9ZSQiFoRJnH7C04aaZ2wCtA/o=""; -+ vendorHash = ""sha256-GM7tXlN7t7c3EHtF4vMzhINIZm+HQpyMYVAv3Dtiny8=""; - - doCheck = false;",chore,Update lambda promtail nix checksum (#14745) -3431580f72d37fc9139e2c65ee3ac0c2a9789a4b,2025-03-05 01:06:09,renovate[bot],"chore(deps): update dependency typescript to v5.8.2 (main) (#16539) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json -index 41ead757ef4ef..5e980d500e2c1 100644 ---- a/pkg/ui/frontend/package-lock.json -+++ b/pkg/ui/frontend/package-lock.json -@@ -6397,9 +6397,9 @@ - } - }, - ""node_modules/typescript"": { -- ""version"": ""5.7.3"", -- ""resolved"": ""https://registry.npmjs.org/typescript/-/typescript-5.7.3.tgz"", -- ""integrity"": ""sha512-84MVSjMEHP+FQRPy3pX9sTVV/INIex71s9TL2Gm5FG/WG1SqXeKyZ0k7/blY/4FdOzI12CBy1vGc4og/eus0fw=="", -+ ""version"": ""5.8.2"", -+ ""resolved"": ""https://registry.npmjs.org/typescript/-/typescript-5.8.2.tgz"", -+ ""integrity"": ""sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ=="", - ""dev"": true, - ""license"": ""Apache-2.0"", - ""bin"": {",chore,"update dependency typescript to v5.8.2 (main) (#16539) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -f1bbdc51dcfbc4294b4d7ea3802e78189b0af1b2,2023-08-30 19:59:58,Shantanu Alshi,"Remove split_queries_by_interval and forward_headers_list (#10395) - -…range config -Removed split_queries_by_interval and forward_headers_list from -query_range config - -The fields were deprecated and hence going to be removed in the next -major version - -**What this PR does / why we need it**: - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - -**Checklist** -- [x] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [ ] Documentation added -- [ ] Tests updated -- [x] `CHANGELOG.md` updated -- [ ] If the change is worth mentioning in the release notes, add -`add-to-release-notes` label -- [x] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/setup/upgrade/_index.md` -- [ ] For Helm chart changes bump the Helm chart version in -`production/helm/loki/Chart.yaml` and update -`production/helm/loki/CHANGELOG.md` and -`production/helm/loki/README.md`. [Example -PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213)",False,"diff --git a/CHANGELOG.md b/CHANGELOG.md -index c6ec019b8db34..f979ef5992240 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -67,6 +67,7 @@ - * [10073](https://github.com/grafana/loki/pull/10073) **sandeepsukhani,salvacorts,vlad-diachenko** Support attaching non-indexed labels to log lines. - * [10378](https://github.com/grafana/loki/pull/10378) **shantanualsi** Remove deprecated `ruler.wal-cleaer.period` - * [10380](https://github.com/grafana/loki/pull/10380) **shantanualsi** Remove `experimental.ruler.enable-api` in favour of `ruler.enable-api` -+* [10395](https://github.com/grafana/loki/pull/10395/) **shantanualshi** Remove deprecated split_queries_by_interval and forward_headers_list configs in query_range - - ##### Fixes - -diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md -index f119f89778235..462bdcfb3f1dd 100644 ---- a/docs/sources/configure/_index.md -+++ b/docs/sources/configure/_index.md -@@ -775,10 +775,6 @@ The `frontend` block configures the Loki query-frontend. - The `query_range` block configures the query splitting and caching in the Loki query-frontend. - - ```yaml --# Deprecated: Use -querier.split-queries-by-interval instead. CLI flag: --# -querier.split-queries-by-day. Split queries by day and execute in parallel. --[split_queries_by_interval: ] -- - # Mutate incoming queries to align their start and end with their step. - # CLI flag: -querier.align-querier-with-step - [align_queries_with_step: | default = false] -@@ -807,11 +803,6 @@ results_cache: - # CLI flag: -querier.parallelise-shardable-queries - [parallelise_shardable_queries: | default = true] - --# Deprecated. List of headers forwarded by the query Frontend to downstream --# querier. --# CLI flag: -frontend.forward-headers-list --[forward_headers_list: | default = []] -- - # The downstream querier is required to answer in the accepted format. Can be - # 'json' or 'protobuf'. Note: Both will still be routed over GRPC. - # CLI flag: -frontend.required-query-response-format -diff --git a/docs/sources/setup/upgrade/_index.md b/docs/sources/setup/upgrade/_index.md -index f6c5faafc81bd..5dc44c938349e 100644 ---- a/docs/sources/setup/upgrade/_index.md -+++ b/docs/sources/setup/upgrade/_index.md -@@ -102,6 +102,8 @@ You can use `--keep-empty` flag to retain them. - 3. `s3.sse-encryption` is removed. AWS now defaults encryption of all buckets to SSE-S3. Use `sse.type` to set SSE type. - 4. `ruler.wal-cleaer.period` is removed. Use `ruler.wal-cleaner.period` instead. - 5. `experimental.ruler.enable-api` is removed. Use `ruler.enable-api` instead. -+6. `split_queries_by_interval` is removed from `query_range` YAML section. You can instead configure it in [Limits Config](/docs/loki/latest/configuration/#limits_config). -+7. `frontend.forward-headers-list` CLI flag and its corresponding YAML setting are removed. - - ### Jsonnet - -diff --git a/pkg/querier/queryrange/queryrangebase/roundtrip.go b/pkg/querier/queryrange/queryrangebase/roundtrip.go -index fc10f3abc75a1..3cfb7ab849a8a 100644 ---- a/pkg/querier/queryrange/queryrangebase/roundtrip.go -+++ b/pkg/querier/queryrange/queryrangebase/roundtrip.go -@@ -22,7 +22,6 @@ import ( - ""net/http"" - ""time"" - -- ""github.com/grafana/dskit/flagext"" - ""github.com/grafana/dskit/httpgrpc"" - ""github.com/grafana/dskit/user"" - ""github.com/opentracing/opentracing-go"" -@@ -38,20 +37,12 @@ var PassthroughMiddleware = MiddlewareFunc(func(next Handler) Handler { - - // Config for query_range middleware chain. - type Config struct { -- // Deprecated: SplitQueriesByInterval will be removed in the next major release -- SplitQueriesByInterval time.Duration `yaml:""split_queries_by_interval"" doc:""deprecated|description=Use -querier.split-queries-by-interval instead. CLI flag: -querier.split-queries-by-day. Split queries by day and execute in parallel.""` -- - AlignQueriesWithStep bool `yaml:""align_queries_with_step""` - ResultsCacheConfig ResultsCacheConfig `yaml:""results_cache""` - CacheResults bool `yaml:""cache_results""` - MaxRetries int `yaml:""max_retries""` - ShardedQueries bool `yaml:""parallelise_shardable_queries""` - -- // TODO(karsten): remove used option ForwardHeaders with Loki 3.0 since -- // it's a breaking change. -- // List of headers which query_range middleware chain would forward to downstream querier. -- ForwardHeaders flagext.StringSlice `yaml:""forward_headers_list""` -- - // Required format for querier responses - RequiredQueryResponseFormat string `yaml:""required_query_response_format""` - } -@@ -62,7 +53,6 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.BoolVar(&cfg.AlignQueriesWithStep, ""querier.align-querier-with-step"", false, ""Mutate incoming queries to align their start and end with their step."") - f.BoolVar(&cfg.CacheResults, ""querier.cache-results"", false, ""Cache query results."") - f.BoolVar(&cfg.ShardedQueries, ""querier.parallelise-shardable-queries"", true, ""Perform query parallelisations based on storage sharding configuration and query ASTs. This feature is supported only by the chunks storage engine."") -- f.Var(&cfg.ForwardHeaders, ""frontend.forward-headers-list"", ""Deprecated. List of headers forwarded by the query Frontend to downstream querier."") - - f.StringVar(&cfg.RequiredQueryResponseFormat, ""frontend.required-query-response-format"", ""json"", ""The downstream querier is required to answer in the accepted format. Can be 'json' or 'protobuf'. Note: Both will still be routed over GRPC."") - -@@ -71,9 +61,6 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - - // Validate validates the config. - func (cfg *Config) Validate() error { -- if cfg.SplitQueriesByInterval != 0 { -- return errors.New(""the yaml flag `split_queries_by_interval` must now be set in the `limits_config` section instead of the `query_range` config section"") -- } - if cfg.CacheResults { - if err := cfg.ResultsCacheConfig.Validate(); err != nil { - return errors.Wrap(err, ""invalid results_cache config"")",unknown,"Remove split_queries_by_interval and forward_headers_list (#10395) - -…range config -Removed split_queries_by_interval and forward_headers_list from -query_range config - -The fields were deprecated and hence going to be removed in the next -major version - -**What this PR does / why we need it**: - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - -**Checklist** -- [x] Reviewed the -[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) -guide (**required**) -- [ ] Documentation added -- [ ] Tests updated -- [x] `CHANGELOG.md` updated -- [ ] If the change is worth mentioning in the release notes, add -`add-to-release-notes` label -- [x] Changes that require user attention or interaction to upgrade are -documented in `docs/sources/setup/upgrade/_index.md` -- [ ] For Helm chart changes bump the Helm chart version in -`production/helm/loki/Chart.yaml` and update -`production/helm/loki/CHANGELOG.md` and -`production/helm/loki/README.md`. [Example -PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213)" -6a473a8407a80f33cfd022fdf8fb5d3875815648,2023-01-24 03:10:56,J Stickler,[Docs] Adding context to Istio installation. (#8248),False,"diff --git a/docs/sources/fundamentals/architecture/rings/_index.md b/docs/sources/fundamentals/architecture/rings/_index.md -index 41da771fb655f..93002905c6f66 100644 ---- a/docs/sources/fundamentals/architecture/rings/_index.md -+++ b/docs/sources/fundamentals/architecture/rings/_index.md -@@ -32,7 +32,7 @@ These components need to be connected into a hash ring: - These components can optionally be connected into a hash ring: - - index gateway - --In an architecture that has three distributors and three ingestors defined, -+In an architecture that has three distributors and three ingesters defined, - the hash rings for these components connect the instances of same-type components. - - ![distributor and ingester rings](./ring-overview.png) -diff --git a/docs/sources/installation/helm/reference.md b/docs/sources/installation/helm/reference.md -index bfb3518e1f483..44fecebf38ee1 100644 ---- a/docs/sources/installation/helm/reference.md -+++ b/docs/sources/installation/helm/reference.md -@@ -213,7 +213,7 @@ null - - backend.serviceLabels - object -- Labels for ingestor service -+ Labels for ingester service -
- {}
- 
-@@ -231,7 +231,7 @@ null - - backend.terminationGracePeriodSeconds - int -- Grace period to allow the backend to shutdown before it is killed. Especially for the ingestor, this must be increased. It must be long enough so backends can be gracefully shutdown flushing/transferring all data and to successfully leave the member ring on shutdown. -+ Grace period to allow the backend to shutdown before it is killed. Especially for the ingester, this must be increased. It must be long enough so backends can be gracefully shutdown flushing/transferring all data and to successfully leave the member ring on shutdown. -
- 300
- 
-@@ -3683,7 +3683,7 @@ null - - write.serviceLabels - object -- Labels for ingestor service -+ Labels for ingester service -
- {}
- 
-@@ -3701,7 +3701,7 @@ null - - write.terminationGracePeriodSeconds - int -- Grace period to allow the write to shutdown before it is killed. Especially for the ingestor, this must be increased. It must be long enough so writes can be gracefully shutdown flushing/transferring all data and to successfully leave the member ring on shutdown. -+ Grace period to allow the write to shutdown before it is killed. Especially for the ingester, this must be increased. It must be long enough so writes can be gracefully shutdown flushing/transferring all data and to successfully leave the member ring on shutdown. -
- 300
- 
-diff --git a/docs/sources/installation/istio.md b/docs/sources/installation/istio.md -index 7cd409aa9d528..2e7ac69d62d44 100644 ---- a/docs/sources/installation/istio.md -+++ b/docs/sources/installation/istio.md -@@ -1,11 +1,11 @@ - --- --title: Installation instructions for Istio -+title: Installing on Istio - description: Installation instructions for Istio - weight: 50 - --- --# Installation instructions for Istio -+# Installing on Istio - --The ingestor, querier, etc. might start, but if those changes are not made, you will see logs like -+When installing Loki on Istio service mesh you must complete some additional steps. Without these steps, the ingester, querier, etc. might start, but you will see logs like the following: - - ``` - loki level=debug ts=2021-11-24T11:33:37.352544925Z caller=broadcast.go:48 msg=""Invalidating forwarded broadcast"" key=collectors/distributor version=123 oldVersion=122 content=[loki-distributor-59c4896444-t9t6g[] oldContent=[loki-distributor-59c4896444-t9t6g[] -@@ -19,12 +19,13 @@ If you try to add `loki` to `Grafana` data sources, you will see logs like (`emp - loki level=warn ts=2021-11-24T08:02:42.08262122Z caller=logging.go:72 traceID=3fc821042d8ada1a orgID=fake msg=""GET /loki/api/v1/labels?end=1637740962079859431&start=1637740361925000000 (500) 97.4µs Response: \""empty ring\\n\"" ws: false; X-Scope-Orgid: fake; uber-trace-id: 3fc821042d8ada1a:1feed8872deea75c:1180f95a8235bb6c:0; "" - ``` - --When istio-injection is enabled on the namespace running Loki, there are few changes needed. One of the main changes is around the `Service` `appProtocols`. --Given that istio will not allow a pod to resolve another pod using an ip address, the pods part of the `memberlist` will fail. -+When you enable istio-injection on the namespace where Loki is running, you need to also modify the configuration for the Loki services. Given that Istio will not allow a pod to resolve another mod using an IP address, you must also modify the `memberlist` service. - --## Changes Required -+## Required changes - --### Query Frontend Service -+### Query frontend service -+ -+Make the following modifications to the file for Loki's Query Frontend service. - - 1. Change the name of `grpc` port to `grpclb`. This is used by the grpc load balancing strategy which relies on SRV records. Otherwise the `querier` will not be able to reach the `query-frontend`. See https://github.com/grafana/loki/blob/0116aa61c86fa983ddcbbd5e30a2141d2e89081a/production/ksonnet/loki/common.libsonnet#L19 - and -@@ -62,6 +63,8 @@ spec: - - ### Querier service - -+Make the following modifications to the file for Loki's Querier service. -+ - Set the `appProtocol` of the `grpc` service to `tcp` - - ``` -@@ -94,7 +97,9 @@ spec: - - ``` - --### Ingester Service and Ingester Headless Service -+### Ingester service and Ingester headless service -+ -+Make the following modifications to the file for Loki's Query Ingester and Ingester Headless service. - - Set the `appProtocol` of the `grpc` port to `tcp` - -@@ -126,7 +131,9 @@ spec: - type: ClusterIP - ``` - --### Distributor Service -+### Distributor service -+ -+Make the following modifications to the file for Loki's Distributor service. - - Set the `appProtocol` of the `grpc` port to `tcp` - -@@ -158,7 +165,9 @@ spec: - - ``` - --## Memberlist Service -+### Memberlist service -+ -+Make the following modifications to the file for the Memberlist service. - - Set the `appProtocol` of the `http` port to `tcp` - -diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml -index f73c9d099ca11..30003f81ca186 100644 ---- a/production/helm/loki/values.yaml -+++ b/production/helm/loki/values.yaml -@@ -662,7 +662,7 @@ write: - podLabels: {} - # -- Additional selector labels for each `write` pod - selectorLabels: {} -- # -- Labels for ingestor service -+ # -- Labels for ingester service - serviceLabels: {} - # -- Comma-separated list of Loki modules to load for the write - targetModule: ""write"" -@@ -682,7 +682,7 @@ write: - extraVolumes: [] - # -- Resource requests and limits for the write - resources: {} -- # -- Grace period to allow the write to shutdown before it is killed. Especially for the ingestor, -+ # -- Grace period to allow the write to shutdown before it is killed. Especially for the ingester, - # this must be increased. It must be long enough so writes can be gracefully shutdown flushing/transferring - # all data and to successfully leave the member ring on shutdown. - terminationGracePeriodSeconds: 300 -@@ -867,7 +867,7 @@ backend: - podAnnotations: {} - # -- Additional selector labels for each `backend` pod - selectorLabels: {} -- # -- Labels for ingestor service -+ # -- Labels for ingester service - serviceLabels: {} - # -- Comma-separated list of Loki modules to load for the read - targetModule: ""backend"" -@@ -885,7 +885,7 @@ backend: - extraVolumes: [] - # -- Resource requests and limits for the backend - resources: {} -- # -- Grace period to allow the backend to shutdown before it is killed. Especially for the ingestor, -+ # -- Grace period to allow the backend to shutdown before it is killed. Especially for the ingester, - # this must be increased. It must be long enough so backends can be gracefully shutdown flushing/transferring - # all data and to successfully leave the member ring on shutdown. - terminationGracePeriodSeconds: 300",unknown,[Docs] Adding context to Istio installation. (#8248) -b90c460769d8c0b1d46434f5ba68de7545a9f9b6,2022-05-20 00:52:31,Karen Miller,"Rename fudge_duplicate_timestamp to be increment_duplicate_timestamp (#6120) - -* Rename fudge_duplicate_timestamp to be increment_duplicate_timestamp - -* run `gofmt -d -w pkg/validation/limits.go` - -Co-authored-by: Christian Simon ",False,"diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md -index 24d37c8631ca7..de57fec746ce0 100644 ---- a/docs/sources/configuration/_index.md -+++ b/docs/sources/configuration/_index.md -@@ -2163,15 +2163,16 @@ The `limits_config` block configures global and per-tenant limits in Loki. - # CLI flag: -distributor.max-line-size-truncate - [max_line_size_truncate: | default = false ] - --# Fudge the log line timestamp during ingestion when it's the same as the previous entry for the same stream --# When enabled, if a log line in a push request has the same timestamp as the previous line --# for the same stream, one nanosecond is added to the log line. This will preserve the received --# order of log lines with the exact same timestamp when they are queried by slightly altering --# their stored timestamp. NOTE: this is imperfect because Loki accepts out of order writes --# and another push request for the same stream could contain duplicate timestamps to existing --# entries and they will not be fudged. --# CLI flag: -validation.fudge-duplicate-timestamps --[fudge_duplicate_timestamp: | default = false ] -+# Alter the log line timestamp during ingestion when the timestamp is the same as the -+# previous entry for the same stream. When enabled, if a log line in a push request has -+# the same timestamp as the previous line for the same stream, one nanosecond is added -+# to the log line. This will preserve the received order of log lines with the exact -+# same timestamp when they are queried, by slightly altering their stored timestamp. -+# NOTE: This is imperfect, because Loki accepts out of order writes, and another push -+# request for the same stream could contain duplicate timestamps to existing -+# entries and they will not be incremented. -+# CLI flag: -validation.increment-duplicate-timestamps -+[increment_duplicate_timestamp: | default = false ] - - # Maximum number of log entries that will be returned for a query. - # CLI flag: -validation.max-entries-limit -diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go -index 577101438b9fd..0ae99089cc702 100644 ---- a/pkg/distributor/distributor.go -+++ b/pkg/distributor/distributor.go -@@ -273,13 +273,13 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log - - stream.Entries[n] = entry - -- // If configured for this tenant, fudge duplicate timestamps. Note, this is imperfect -+ // If configured for this tenant, increment duplicate timestamps. Note, this is imperfect - // since Loki will accept out of order writes it doesn't account for separate - // pushes with overlapping time ranges having entries with duplicate timestamps -- if validationContext.fudgeDuplicateTimestamps && n != 0 && stream.Entries[n-1].Timestamp.Equal(entry.Timestamp) { -+ if validationContext.incrementDuplicateTimestamps && n != 0 && stream.Entries[n-1].Timestamp.Equal(entry.Timestamp) { - // Traditional logic for Loki is that 2 lines with the same timestamp and - // exact same content will be de-duplicated, (i.e. only one will be stored, others dropped) -- // To maintain this behavior, only fudge the timestamp if the log content is different -+ // To maintain this behavior, only increment the timestamp if the log content is different - if stream.Entries[n-1].Line != entry.Line { - stream.Entries[n].Timestamp = entry.Timestamp.Add(1 * time.Nanosecond) - } -diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go -index 2da8986a3f145..a3c2623f63620 100644 ---- a/pkg/distributor/distributor_test.go -+++ b/pkg/distributor/distributor_test.go -@@ -100,23 +100,23 @@ func TestDistributor(t *testing.T) { - } - } - --func Test_FudgeTimestamp(t *testing.T) { -- fudgingDisabled := &validation.Limits{} -- flagext.DefaultValues(fudgingDisabled) -- fudgingDisabled.RejectOldSamples = false -+func Test_IncrementTimestamp(t *testing.T) { -+ incrementingDisabled := &validation.Limits{} -+ flagext.DefaultValues(incrementingDisabled) -+ incrementingDisabled.RejectOldSamples = false - -- fudgingEnabled := &validation.Limits{} -- flagext.DefaultValues(fudgingEnabled) -- fudgingEnabled.RejectOldSamples = false -- fudgingEnabled.FudgeDuplicateTimestamp = true -+ incrementingEnabled := &validation.Limits{} -+ flagext.DefaultValues(incrementingEnabled) -+ incrementingEnabled.RejectOldSamples = false -+ incrementingEnabled.IncrementDuplicateTimestamp = true - - tests := map[string]struct { - limits *validation.Limits - push *logproto.PushRequest - expectedPush *logproto.PushRequest - }{ -- ""fudging disabled, no dupes"": { -- limits: fudgingDisabled, -+ ""incrementing disabled, no dupes"": { -+ limits: incrementingDisabled, - push: &logproto.PushRequest{ - Streams: []logproto.Stream{ - { -@@ -140,8 +140,8 @@ func Test_FudgeTimestamp(t *testing.T) { - }, - }, - }, -- ""fudging disabled, with dupe timestamp different entry"": { -- limits: fudgingDisabled, -+ ""incrementing disabled, with dupe timestamp different entry"": { -+ limits: incrementingDisabled, - push: &logproto.PushRequest{ - Streams: []logproto.Stream{ - { -@@ -165,8 +165,8 @@ func Test_FudgeTimestamp(t *testing.T) { - }, - }, - }, -- ""fudging disabled, with dupe timestamp same entry"": { -- limits: fudgingDisabled, -+ ""incrementing disabled, with dupe timestamp same entry"": { -+ limits: incrementingDisabled, - push: &logproto.PushRequest{ - Streams: []logproto.Stream{ - { -@@ -190,8 +190,8 @@ func Test_FudgeTimestamp(t *testing.T) { - }, - }, - }, -- ""fudging enabled, no dupes"": { -- limits: fudgingEnabled, -+ ""incrementing enabled, no dupes"": { -+ limits: incrementingEnabled, - push: &logproto.PushRequest{ - Streams: []logproto.Stream{ - { -@@ -215,8 +215,8 @@ func Test_FudgeTimestamp(t *testing.T) { - }, - }, - }, -- ""fudging enabled, with dupe timestamp different entry"": { -- limits: fudgingEnabled, -+ ""incrementing enabled, with dupe timestamp different entry"": { -+ limits: incrementingEnabled, - push: &logproto.PushRequest{ - Streams: []logproto.Stream{ - { -@@ -240,8 +240,8 @@ func Test_FudgeTimestamp(t *testing.T) { - }, - }, - }, -- ""fudging enabled, with dupe timestamp same entry"": { -- limits: fudgingEnabled, -+ ""incrementing enabled, with dupe timestamp same entry"": { -+ limits: incrementingEnabled, - push: &logproto.PushRequest{ - Streams: []logproto.Stream{ - { -@@ -265,8 +265,8 @@ func Test_FudgeTimestamp(t *testing.T) { - }, - }, - }, -- ""fudging enabled, multiple subsequent fudges"": { -- limits: fudgingEnabled, -+ ""incrementing enabled, multiple subsequent increments"": { -+ limits: incrementingEnabled, - push: &logproto.PushRequest{ - Streams: []logproto.Stream{ - { -diff --git a/pkg/distributor/limits.go b/pkg/distributor/limits.go -index cade28eb093da..9cff9c140140f 100644 ---- a/pkg/distributor/limits.go -+++ b/pkg/distributor/limits.go -@@ -15,5 +15,5 @@ type Limits interface { - RejectOldSamples(userID string) bool - RejectOldSamplesMaxAge(userID string) time.Duration - -- FudgeDuplicateTimestamps(userID string) bool -+ IncrementDuplicateTimestamps(userID string) bool - } -diff --git a/pkg/distributor/validator.go b/pkg/distributor/validator.go -index 2aca4f42e029c..74f17096c5f79 100644 ---- a/pkg/distributor/validator.go -+++ b/pkg/distributor/validator.go -@@ -40,23 +40,23 @@ type validationContext struct { - maxLabelNameLength int - maxLabelValueLength int - -- fudgeDuplicateTimestamps bool -+ incrementDuplicateTimestamps bool - - userID string - } - - func (v Validator) getValidationContextForTime(now time.Time, userID string) validationContext { - return validationContext{ -- userID: userID, -- rejectOldSample: v.RejectOldSamples(userID), -- rejectOldSampleMaxAge: now.Add(-v.RejectOldSamplesMaxAge(userID)).UnixNano(), -- creationGracePeriod: now.Add(v.CreationGracePeriod(userID)).UnixNano(), -- maxLineSize: v.MaxLineSize(userID), -- maxLineSizeTruncate: v.MaxLineSizeTruncate(userID), -- maxLabelNamesPerSeries: v.MaxLabelNamesPerSeries(userID), -- maxLabelNameLength: v.MaxLabelNameLength(userID), -- maxLabelValueLength: v.MaxLabelValueLength(userID), -- fudgeDuplicateTimestamps: v.FudgeDuplicateTimestamps(userID), -+ userID: userID, -+ rejectOldSample: v.RejectOldSamples(userID), -+ rejectOldSampleMaxAge: now.Add(-v.RejectOldSamplesMaxAge(userID)).UnixNano(), -+ creationGracePeriod: now.Add(v.CreationGracePeriod(userID)).UnixNano(), -+ maxLineSize: v.MaxLineSize(userID), -+ maxLineSizeTruncate: v.MaxLineSizeTruncate(userID), -+ maxLabelNamesPerSeries: v.MaxLabelNamesPerSeries(userID), -+ maxLabelNameLength: v.MaxLabelNameLength(userID), -+ maxLabelValueLength: v.MaxLabelValueLength(userID), -+ incrementDuplicateTimestamps: v.IncrementDuplicateTimestamps(userID), - } - } - -diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go -index b7b13398d0095..2cc1ad8e50e06 100644 ---- a/pkg/validation/limits.go -+++ b/pkg/validation/limits.go -@@ -46,19 +46,19 @@ const ( - // to support user-friendly duration format (e.g: ""1h30m45s"") in JSON value. - type Limits struct { - // Distributor enforced limits. -- IngestionRateStrategy string `yaml:""ingestion_rate_strategy"" json:""ingestion_rate_strategy""` -- IngestionRateMB float64 `yaml:""ingestion_rate_mb"" json:""ingestion_rate_mb""` -- IngestionBurstSizeMB float64 `yaml:""ingestion_burst_size_mb"" json:""ingestion_burst_size_mb""` -- MaxLabelNameLength int `yaml:""max_label_name_length"" json:""max_label_name_length""` -- MaxLabelValueLength int `yaml:""max_label_value_length"" json:""max_label_value_length""` -- MaxLabelNamesPerSeries int `yaml:""max_label_names_per_series"" json:""max_label_names_per_series""` -- RejectOldSamples bool `yaml:""reject_old_samples"" json:""reject_old_samples""` -- RejectOldSamplesMaxAge model.Duration `yaml:""reject_old_samples_max_age"" json:""reject_old_samples_max_age""` -- CreationGracePeriod model.Duration `yaml:""creation_grace_period"" json:""creation_grace_period""` -- EnforceMetricName bool `yaml:""enforce_metric_name"" json:""enforce_metric_name""` -- MaxLineSize flagext.ByteSize `yaml:""max_line_size"" json:""max_line_size""` -- MaxLineSizeTruncate bool `yaml:""max_line_size_truncate"" json:""max_line_size_truncate""` -- FudgeDuplicateTimestamp bool `yaml:""fudge_duplicate_timestamp"" json:""fudge_duplicate_timestamp""` -+ IngestionRateStrategy string `yaml:""ingestion_rate_strategy"" json:""ingestion_rate_strategy""` -+ IngestionRateMB float64 `yaml:""ingestion_rate_mb"" json:""ingestion_rate_mb""` -+ IngestionBurstSizeMB float64 `yaml:""ingestion_burst_size_mb"" json:""ingestion_burst_size_mb""` -+ MaxLabelNameLength int `yaml:""max_label_name_length"" json:""max_label_name_length""` -+ MaxLabelValueLength int `yaml:""max_label_value_length"" json:""max_label_value_length""` -+ MaxLabelNamesPerSeries int `yaml:""max_label_names_per_series"" json:""max_label_names_per_series""` -+ RejectOldSamples bool `yaml:""reject_old_samples"" json:""reject_old_samples""` -+ RejectOldSamplesMaxAge model.Duration `yaml:""reject_old_samples_max_age"" json:""reject_old_samples_max_age""` -+ CreationGracePeriod model.Duration `yaml:""creation_grace_period"" json:""creation_grace_period""` -+ EnforceMetricName bool `yaml:""enforce_metric_name"" json:""enforce_metric_name""` -+ MaxLineSize flagext.ByteSize `yaml:""max_line_size"" json:""max_line_size""` -+ MaxLineSizeTruncate bool `yaml:""max_line_size_truncate"" json:""max_line_size_truncate""` -+ IncrementDuplicateTimestamp bool `yaml:""increment_duplicate_timestamp"" json:""increment_duplicate_timestamp""` - - // Ingester enforced limits. - MaxLocalStreamsPerUser int `yaml:""max_streams_per_user"" json:""max_streams_per_user""` -@@ -136,7 +136,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { - f.IntVar(&l.MaxLabelValueLength, ""validation.max-length-label-value"", 2048, ""Maximum length accepted for label value. This setting also applies to the metric name"") - f.IntVar(&l.MaxLabelNamesPerSeries, ""validation.max-label-names-per-series"", 30, ""Maximum number of label names per series."") - f.BoolVar(&l.RejectOldSamples, ""validation.reject-old-samples"", true, ""Reject old samples."") -- f.BoolVar(&l.FudgeDuplicateTimestamp, ""validation.fudge-duplicate-timestamps"", false, ""Fudge the timestamp of a log line by one nanosecond in the future from a previous entry for the same stream with the same timestamp, guarantees sort order at query time."") -+ f.BoolVar(&l.IncrementDuplicateTimestamp, ""validation.increment-duplicate-timestamps"", false, ""Increment the timestamp of a log line by one nanosecond in the future from a previous entry for the same stream with the same timestamp; guarantees sort order at query time."") - - _ = l.RejectOldSamplesMaxAge.Set(""7d"") - f.Var(&l.RejectOldSamplesMaxAge, ""validation.reject-old-samples.max-age"", ""Maximum accepted sample age before rejecting."") -@@ -539,8 +539,8 @@ func (o *Overrides) PerStreamRateLimit(userID string) RateLimit { - } - } - --func (o *Overrides) FudgeDuplicateTimestamps(userID string) bool { -- return o.getOverridesForUser(userID).FudgeDuplicateTimestamp -+func (o *Overrides) IncrementDuplicateTimestamps(userID string) bool { -+ return o.getOverridesForUser(userID).IncrementDuplicateTimestamp - } - - func (o *Overrides) getOverridesForUser(userID string) *Limits {",unknown,"Rename fudge_duplicate_timestamp to be increment_duplicate_timestamp (#6120) - -* Rename fudge_duplicate_timestamp to be increment_duplicate_timestamp - -* run `gofmt -d -w pkg/validation/limits.go` - -Co-authored-by: Christian Simon " -7635a5cffa80cf5ff627b8de2ed00fa96c058629,2024-11-08 14:41:16,Periklis Tsirakidis,feat(operator): Add support for managed GCP WorkloadIdentity (#14752),False,"diff --git a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml -index 88e32263e6ea9..2eae710050f94 100644 ---- a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml -+++ b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml -@@ -159,7 +159,7 @@ metadata: - features.operators.openshift.io/tls-profiles: ""true"" - features.operators.openshift.io/token-auth-aws: ""true"" - features.operators.openshift.io/token-auth-azure: ""true"" -- features.operators.openshift.io/token-auth-gcp: ""false"" -+ features.operators.openshift.io/token-auth-gcp: ""true"" - operators.operatorframework.io/builder: operator-sdk-unknown - operators.operatorframework.io/project_layout: go.kubebuilder.io/v4 - repository: https://github.com/grafana/loki/tree/main/operator -diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml -index bccd4ce369768..31c41588a0fbe 100644 ---- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml -+++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml -@@ -166,7 +166,7 @@ metadata: - features.operators.openshift.io/tls-profiles: ""true"" - features.operators.openshift.io/token-auth-aws: ""true"" - features.operators.openshift.io/token-auth-azure: ""true"" -- features.operators.openshift.io/token-auth-gcp: ""false"" -+ features.operators.openshift.io/token-auth-gcp: ""true"" - olm.skipRange: '>=5.9.0-0 <6.1.0' - operatorframework.io/cluster-monitoring: ""true"" - operatorframework.io/suggested-namespace: openshift-operators-redhat -diff --git a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml -index 545fdedc903cd..4da83e32c0cd8 100644 ---- a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml -+++ b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml -@@ -16,7 +16,7 @@ metadata: - features.operators.openshift.io/tls-profiles: ""true"" - features.operators.openshift.io/token-auth-aws: ""true"" - features.operators.openshift.io/token-auth-azure: ""true"" -- features.operators.openshift.io/token-auth-gcp: ""false"" -+ features.operators.openshift.io/token-auth-gcp: ""true"" - repository: https://github.com/grafana/loki/tree/main/operator - support: Grafana Loki SIG Operator - labels: -diff --git a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml -index 4e78fabef9e9a..f7a44332d7705 100644 ---- a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml -+++ b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml -@@ -22,7 +22,7 @@ metadata: - features.operators.openshift.io/tls-profiles: ""true"" - features.operators.openshift.io/token-auth-aws: ""true"" - features.operators.openshift.io/token-auth-azure: ""true"" -- features.operators.openshift.io/token-auth-gcp: ""false"" -+ features.operators.openshift.io/token-auth-gcp: ""true"" - olm.skipRange: '>=5.9.0-0 <6.1.0' - operatorframework.io/cluster-monitoring: ""true"" - operatorframework.io/suggested-namespace: openshift-operators-redhat -diff --git a/operator/internal/config/managed_auth.go b/operator/internal/config/managed_auth.go -index 6e3dc524716b1..4d3a07cdb0590 100644 ---- a/operator/internal/config/managed_auth.go -+++ b/operator/internal/config/managed_auth.go -@@ -1,6 +1,9 @@ - package config - --import ""os"" -+import ( -+ ""fmt"" -+ ""os"" -+) - - type AWSEnvironment struct { - RoleARN string -@@ -13,9 +16,15 @@ type AzureEnvironment struct { - Region string - } - -+type GCPEnvironment struct { -+ Audience string -+ ServiceAccountEmail string -+} -+ - type TokenCCOAuthConfig struct { - AWS *AWSEnvironment - Azure *AzureEnvironment -+ GCP *GCPEnvironment - } - - func discoverTokenCCOAuthConfig() *TokenCCOAuthConfig { -@@ -28,6 +37,12 @@ func discoverTokenCCOAuthConfig() *TokenCCOAuthConfig { - subscriptionID := os.Getenv(""SUBSCRIPTIONID"") - region := os.Getenv(""REGION"") - -+ // GCP -+ projectNumber := os.Getenv(""PROJECT_NUMBER"") -+ poolID := os.Getenv(""POOL_ID"") -+ providerID := os.Getenv(""PROVIDER_ID"") -+ serviceAccountEmail := os.Getenv(""SERVICE_ACCOUNT_EMAIL"") -+ - switch { - case roleARN != """": - return &TokenCCOAuthConfig{ -@@ -44,6 +59,20 @@ func discoverTokenCCOAuthConfig() *TokenCCOAuthConfig { - Region: region, - }, - } -+ case projectNumber != """" && poolID != """" && providerID != """" && serviceAccountEmail != """": -+ audience := fmt.Sprintf( -+ ""//iam.googleapis.com/projects/%s/locations/global/workloadIdentityPools/%s/providers/%s"", -+ projectNumber, -+ poolID, -+ providerID, -+ ) -+ -+ return &TokenCCOAuthConfig{ -+ GCP: &GCPEnvironment{ -+ Audience: audience, -+ ServiceAccountEmail: serviceAccountEmail, -+ }, -+ } - } - - return nil -diff --git a/operator/internal/handlers/internal/storage/secrets.go b/operator/internal/handlers/internal/storage/secrets.go -index 36065afb4c8ab..999c972b734c3 100644 ---- a/operator/internal/handlers/internal/storage/secrets.go -+++ b/operator/internal/handlers/internal/storage/secrets.go -@@ -33,8 +33,7 @@ var ( - errSecretUnknownSSEType = errors.New(""unsupported SSE type (supported: SSE-KMS, SSE-S3)"") - errSecretHashError = errors.New(""error calculating hash for secret"") - -- errSecretUnknownCredentialMode = errors.New(""unknown credential mode"") -- errSecretUnsupportedCredentialMode = errors.New(""combination of storage type and credential mode not supported"") -+ errSecretUnknownCredentialMode = errors.New(""unknown credential mode"") - - errAzureManagedIdentityNoOverride = errors.New(""when in managed mode, storage secret can not contain credentials"") - errAzureInvalidEnvironment = errors.New(""azure environment invalid (valid values: AzureGlobal, AzureChinaCloud, AzureGermanCloud, AzureUSGovernment)"") -@@ -47,6 +46,7 @@ var ( - - errGCPParseCredentialsFile = errors.New(""gcp storage secret cannot be parsed from JSON content"") - errGCPWrongCredentialSourceFile = errors.New(""credential source in secret needs to point to token file"") -+ errGCPInvalidCredentialsFile = errors.New(""gcp credentials file contains invalid fields"") - - azureValidEnvironments = map[string]bool{ - ""AzureGlobal"": true, -@@ -355,6 +355,15 @@ func extractGCSConfigSecret(s *corev1.Secret, credentialMode lokiv1.CredentialMo - } - - switch credentialMode { -+ case lokiv1.CredentialModeTokenCCO: -+ if _, ok := s.Data[storage.KeyGCPServiceAccountKeyFilename]; ok { -+ return nil, fmt.Errorf(""%w: %s"", errGCPInvalidCredentialsFile, ""key.json must not be set for CredentialModeTokenCCO"") -+ } -+ -+ return &storage.GCSStorageConfig{ -+ Bucket: string(bucket), -+ WorkloadIdentity: true, -+ }, nil - case lokiv1.CredentialModeStatic: - return &storage.GCSStorageConfig{ - Bucket: string(bucket), -@@ -380,12 +389,9 @@ func extractGCSConfigSecret(s *corev1.Secret, credentialMode lokiv1.CredentialMo - WorkloadIdentity: true, - Audience: audience, - }, nil -- case lokiv1.CredentialModeTokenCCO: -- return nil, fmt.Errorf(""%w: type: %s credentialMode: %s"", errSecretUnsupportedCredentialMode, lokiv1.ObjectStorageSecretGCS, credentialMode) - default: -+ return nil, fmt.Errorf(""%w: %s"", errSecretUnknownCredentialMode, credentialMode) - } -- -- return nil, fmt.Errorf(""%w: %s"", errSecretUnknownCredentialMode, credentialMode) - } - - func extractS3ConfigSecret(s *corev1.Secret, credentialMode lokiv1.CredentialMode) (*storage.S3StorageConfig, error) { -diff --git a/operator/internal/handlers/internal/storage/secrets_test.go b/operator/internal/handlers/internal/storage/secrets_test.go -index 0688b099f3a86..466c9f3487d8d 100644 ---- a/operator/internal/handlers/internal/storage/secrets_test.go -+++ b/operator/internal/handlers/internal/storage/secrets_test.go -@@ -277,6 +277,8 @@ func TestGCSExtract(t *testing.T) { - type test struct { - name string - secret *corev1.Secret -+ tokenAuth *corev1.Secret -+ featureGates configv1.FeatureGates - wantError string - wantCredentialMode lokiv1.CredentialMode - } -@@ -343,6 +345,45 @@ func TestGCSExtract(t *testing.T) { - }, - wantCredentialMode: lokiv1.CredentialModeToken, - }, -+ { -+ name: ""invalid for token CCO"", -+ featureGates: configv1.FeatureGates{ -+ OpenShift: configv1.OpenShiftFeatureGates{ -+ Enabled: true, -+ TokenCCOAuthEnv: true, -+ }, -+ }, -+ secret: &corev1.Secret{ -+ ObjectMeta: metav1.ObjectMeta{Name: ""test""}, -+ Data: map[string][]byte{ -+ ""bucketname"": []byte(""here""), -+ ""key.json"": []byte(""{\""type\"": \""external_account\"", \""audience\"": \""\"", \""service_account_id\"": \""\""}""), -+ }, -+ }, -+ wantError: ""gcp credentials file contains invalid fields: key.json must not be set for CredentialModeTokenCCO"", -+ }, -+ { -+ name: ""valid for token CCO"", -+ featureGates: configv1.FeatureGates{ -+ OpenShift: configv1.OpenShiftFeatureGates{ -+ Enabled: true, -+ TokenCCOAuthEnv: true, -+ }, -+ }, -+ secret: &corev1.Secret{ -+ ObjectMeta: metav1.ObjectMeta{Name: ""test""}, -+ Data: map[string][]byte{ -+ ""bucketname"": []byte(""here""), -+ }, -+ }, -+ tokenAuth: &corev1.Secret{ -+ ObjectMeta: metav1.ObjectMeta{Name: ""token-auth-config""}, -+ Data: map[string][]byte{ -+ ""service_account.json"": []byte(""{\""type\"": \""external_account\"", \""audience\"": \""test\"", \""service_account_id\"": \""\""}""), -+ }, -+ }, -+ wantCredentialMode: lokiv1.CredentialModeTokenCCO, -+ }, - } - for _, tst := range table { - t.Run(tst.name, func(t *testing.T) { -@@ -352,7 +393,7 @@ func TestGCSExtract(t *testing.T) { - Type: lokiv1.ObjectStorageSecretGCS, - } - -- opts, err := extractSecrets(spec, tst.secret, nil, configv1.FeatureGates{}) -+ opts, err := extractSecrets(spec, tst.secret, tst.tokenAuth, tst.featureGates) - if tst.wantError == """" { - require.NoError(t, err) - require.Equal(t, tst.wantCredentialMode, opts.CredentialMode) -diff --git a/operator/internal/manifests/openshift/credentialsrequest.go b/operator/internal/manifests/openshift/credentialsrequest.go -index 3f202998212a9..7ed2476b3eb40 100644 ---- a/operator/internal/manifests/openshift/credentialsrequest.go -+++ b/operator/internal/manifests/openshift/credentialsrequest.go -@@ -98,6 +98,15 @@ func encodeProviderSpec(env *config.TokenCCOAuthConfig) (*runtime.RawExtension, - AzureSubscriptionID: azure.SubscriptionID, - AzureTenantID: azure.TenantID, - } -+ case env.GCP != nil: -+ spec = &cloudcredentialv1.GCPProviderSpec{ -+ PredefinedRoles: []string{ -+ ""roles/iam.workloadIdentityUser"", -+ ""roles/storage.objectAdmin"", -+ }, -+ Audience: env.GCP.Audience, -+ ServiceAccountEmail: env.GCP.ServiceAccountEmail, -+ } - } - - encodedSpec, err := cloudcredentialv1.Codec.EncodeProviderSpec(spec.DeepCopyObject()) -diff --git a/operator/internal/manifests/storage/configure.go b/operator/internal/manifests/storage/configure.go -index ce6fa78273349..a8d0bf69f82b9 100644 ---- a/operator/internal/manifests/storage/configure.go -+++ b/operator/internal/manifests/storage/configure.go -@@ -141,7 +141,9 @@ func ensureObjectStoreCredentials(p *corev1.PodSpec, opts Options) corev1.PodSpe - volumes = append(volumes, saTokenVolume(opts)) - container.VolumeMounts = append(container.VolumeMounts, saTokenVolumeMount) - -- if opts.OpenShift.TokenCCOAuthEnabled() && opts.S3 != nil && opts.S3.STS { -+ isSTS := opts.S3 != nil && opts.S3.STS -+ isWIF := opts.GCS != nil && opts.GCS.WorkloadIdentity -+ if opts.OpenShift.TokenCCOAuthEnabled() && (isSTS || isWIF) { - volumes = append(volumes, tokenCCOAuthConfigVolume(opts)) - container.VolumeMounts = append(container.VolumeMounts, tokenCCOAuthConfigVolumeMount) - } -@@ -223,8 +225,14 @@ func tokenAuthCredentials(opts Options) []corev1.EnvVar { - envVarFromValue(EnvAzureFederatedTokenFile, ServiceAccountTokenFilePath), - } - case lokiv1.ObjectStorageSecretGCS: -- return []corev1.EnvVar{ -- envVarFromValue(EnvGoogleApplicationCredentials, path.Join(secretDirectory, KeyGCPServiceAccountKeyFilename)), -+ if opts.OpenShift.TokenCCOAuthEnabled() { -+ return []corev1.EnvVar{ -+ envVarFromValue(EnvGoogleApplicationCredentials, path.Join(tokenAuthConfigDirectory, KeyGCPManagedServiceAccountKeyFilename)), -+ } -+ } else { -+ return []corev1.EnvVar{ -+ envVarFromValue(EnvGoogleApplicationCredentials, path.Join(secretDirectory, KeyGCPServiceAccountKeyFilename)), -+ } - } - default: - return []corev1.EnvVar{} -@@ -326,7 +334,10 @@ func saTokenVolume(opts Options) corev1.Volume { - audience = opts.Azure.Audience - } - case lokiv1.ObjectStorageSecretGCS: -- audience = opts.GCS.Audience -+ audience = gcpDefaultAudience -+ if opts.GCS.Audience != """" { -+ audience = opts.GCS.Audience -+ } - } - return corev1.Volume{ - Name: saTokenVolumeName, -diff --git a/operator/internal/manifests/storage/configure_test.go b/operator/internal/manifests/storage/configure_test.go -index 3080f924c11cf..2c4ef5636d9c0 100644 ---- a/operator/internal/manifests/storage/configure_test.go -+++ b/operator/internal/manifests/storage/configure_test.go -@@ -689,6 +689,103 @@ func TestConfigureDeploymentForStorageType(t *testing.T) { - }, - }, - }, -+ { -+ desc: ""object storage GCS with Workload Identity and OpenShift Managed Credentials"", -+ opts: Options{ -+ SecretName: ""test"", -+ SharedStore: lokiv1.ObjectStorageSecretGCS, -+ CredentialMode: lokiv1.CredentialModeTokenCCO, -+ GCS: &GCSStorageConfig{ -+ WorkloadIdentity: true, -+ }, -+ OpenShift: OpenShiftOptions{ -+ Enabled: true, -+ CloudCredentials: CloudCredentials{ -+ SecretName: ""cloud-credentials"", -+ SHA1: ""deadbeef"", -+ }, -+ }, -+ }, -+ dpl: &appsv1.Deployment{ -+ Spec: appsv1.DeploymentSpec{ -+ Template: corev1.PodTemplateSpec{ -+ Spec: corev1.PodSpec{ -+ Containers: []corev1.Container{ -+ { -+ Name: ""loki-ingester"", -+ }, -+ }, -+ }, -+ }, -+ }, -+ }, -+ want: &appsv1.Deployment{ -+ Spec: appsv1.DeploymentSpec{ -+ Template: corev1.PodTemplateSpec{ -+ Spec: corev1.PodSpec{ -+ Containers: []corev1.Container{ -+ { -+ Name: ""loki-ingester"", -+ VolumeMounts: []corev1.VolumeMount{ -+ { -+ Name: ""test"", -+ ReadOnly: false, -+ MountPath: ""/etc/storage/secrets"", -+ }, -+ { -+ Name: saTokenVolumeName, -+ ReadOnly: false, -+ MountPath: saTokenVolumeMountPath, -+ }, -+ tokenCCOAuthConfigVolumeMount, -+ }, -+ Env: []corev1.EnvVar{ -+ { -+ Name: EnvGoogleApplicationCredentials, -+ Value: ""/etc/storage/token-auth/service_account.json"", -+ }, -+ }, -+ }, -+ }, -+ Volumes: []corev1.Volume{ -+ { -+ Name: ""test"", -+ VolumeSource: corev1.VolumeSource{ -+ Secret: &corev1.SecretVolumeSource{ -+ SecretName: ""test"", -+ }, -+ }, -+ }, -+ { -+ Name: saTokenVolumeName, -+ VolumeSource: corev1.VolumeSource{ -+ Projected: &corev1.ProjectedVolumeSource{ -+ Sources: []corev1.VolumeProjection{ -+ { -+ ServiceAccountToken: &corev1.ServiceAccountTokenProjection{ -+ Audience: gcpDefaultAudience, -+ ExpirationSeconds: ptr.To[int64](3600), -+ Path: corev1.ServiceAccountTokenKey, -+ }, -+ }, -+ }, -+ }, -+ }, -+ }, -+ { -+ Name: tokenAuthConfigVolumeName, -+ VolumeSource: corev1.VolumeSource{ -+ Secret: &corev1.SecretVolumeSource{ -+ SecretName: ""cloud-credentials"", -+ }, -+ }, -+ }, -+ }, -+ }, -+ }, -+ }, -+ }, -+ }, - { - desc: ""object storage S3"", - opts: Options{ -diff --git a/operator/internal/manifests/storage/var.go b/operator/internal/manifests/storage/var.go -index ccb69ff27289d..108d811412c3d 100644 ---- a/operator/internal/manifests/storage/var.go -+++ b/operator/internal/manifests/storage/var.go -@@ -97,6 +97,8 @@ const ( - KeyGCPStorageBucketName = ""bucketname"" - // KeyGCPServiceAccountKeyFilename is the service account key filename containing the Google authentication credentials. - KeyGCPServiceAccountKeyFilename = ""key.json"" -+ // KeyGCPManagedServiceAccountKeyFilename is the service account key filename for the managed Google service account. -+ KeyGCPManagedServiceAccountKeyFilename = ""service_account.json"" - - // KeySwiftAuthURL is the secret data key for the OpenStack Swift authentication URL. - KeySwiftAuthURL = ""auth_url"" -@@ -140,9 +142,9 @@ const ( - tokenAuthConfigVolumeName = ""token-auth-config"" - tokenAuthConfigDirectory = ""/etc/storage/token-auth"" - -- awsDefaultAudience = ""sts.amazonaws.com"" -- -+ awsDefaultAudience = ""sts.amazonaws.com"" - azureDefaultAudience = ""api://AzureADTokenExchange"" -+ gcpDefaultAudience = ""openshift"" - - azureManagedCredentialKeyClientID = ""azure_client_id"" - azureManagedCredentialKeyTenantID = ""azure_tenant_id""",feat,Add support for managed GCP WorkloadIdentity (#14752) -139c4e5f4d56aad814d82746e5c62d12188f23ad,2025-01-24 20:29:00,renovate[bot],"fix(deps): update module github.com/opentracing-contrib/go-grpc to v0.1.1 (main) (#15941) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/go.mod b/go.mod -index 992e476ffc89e..0adb7a0c57e93 100644 ---- a/go.mod -+++ b/go.mod -@@ -75,7 +75,7 @@ require ( - github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f - github.com/oklog/run v1.1.0 - github.com/oklog/ulid v1.3.1 // indirect -- github.com/opentracing-contrib/go-grpc v0.1.0 -+ github.com/opentracing-contrib/go-grpc v0.1.1 - github.com/opentracing-contrib/go-stdlib v1.1.0 - github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b - github.com/oschwald/geoip2-golang v1.11.0 -diff --git a/go.sum b/go.sum -index c871d7268e7fe..dfe58f1a51351 100644 ---- a/go.sum -+++ b/go.sum -@@ -928,8 +928,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 - github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= - github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= - github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= --github.com/opentracing-contrib/go-grpc v0.1.0 h1:9JHDtQXv6UL0tFF8KJB/4ApJgeOcaHp1h07d0PJjESc= --github.com/opentracing-contrib/go-grpc v0.1.0/go.mod h1:i3/jx/TvJZ/HKidtT4XGIi/NosUEpzS9xjVJctbKZzI= -+github.com/opentracing-contrib/go-grpc v0.1.1 h1:Ws7IN1zyiL1DFqKQPhRXuKe5pLYzMfdxnC1qtajE2PE= -+github.com/opentracing-contrib/go-grpc v0.1.1/go.mod h1:Nu6sz+4zzgxXu8rvKfnwjBEmHsuhTigxRwV2RhELrS8= - github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= - github.com/opentracing-contrib/go-stdlib v1.1.0 h1:cZBWc4pA4e65tqTJddbflK435S0tDImj6c9BMvkdUH0= - github.com/opentracing-contrib/go-stdlib v1.1.0/go.mod h1:S0p+X9p6dcBkoMTL+Qq2VOvxKs9ys5PpYWXWqlCS0bQ= -diff --git a/vendor/modules.txt b/vendor/modules.txt -index 31cc805677801..89d4b9e3abfea 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -1355,7 +1355,7 @@ github.com/opencontainers/go-digest - ## explicit; go 1.18 - github.com/opencontainers/image-spec/specs-go - github.com/opencontainers/image-spec/specs-go/v1 --# github.com/opentracing-contrib/go-grpc v0.1.0 -+# github.com/opentracing-contrib/go-grpc v0.1.1 - ## explicit; go 1.22.7 - github.com/opentracing-contrib/go-grpc - # github.com/opentracing-contrib/go-stdlib v1.1.0",fix,"update module github.com/opentracing-contrib/go-grpc to v0.1.1 (main) (#15941) - -Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>" -a3feacea347a636e733df36c362ac15667008534,2023-08-23 18:58:58,dependabot[bot],build(deps): bump github.com/google/uuid from 1.3.0 to 1.3.1 in /operator (#10305),False,"diff --git a/operator/go.mod b/operator/go.mod -index c51d3a4daaa04..1690f35c7287c 100644 ---- a/operator/go.mod -+++ b/operator/go.mod -@@ -6,7 +6,7 @@ require ( - github.com/ViaQ/logerr/v2 v2.1.0 - github.com/go-logr/logr v1.2.3 - github.com/google/go-cmp v0.5.9 -- github.com/google/uuid v1.3.0 -+ github.com/google/uuid v1.3.1 - github.com/grafana/loki v1.6.2-0.20230403212622-90888a0cc737 - github.com/grafana/loki/operator/apis/loki v0.0.0-00010101000000-000000000000 - github.com/imdario/mergo v0.3.13 -diff --git a/operator/go.sum b/operator/go.sum -index 4c8b9a98ed3e3..de21537800333 100644 ---- a/operator/go.sum -+++ b/operator/go.sum -@@ -403,8 +403,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 - github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= - github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= - github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= --github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= --github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -+github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -+github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= - github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= - github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= - github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=",build,bump github.com/google/uuid from 1.3.0 to 1.3.1 in /operator (#10305) -f158b5bc3e94ce9c32aaa91ca259c9b074620284,2024-02-16 05:09:29,Owen Diehl,correctly set block iter when no overlapping blocks are found (#11973),False,"diff --git a/pkg/bloomcompactor/batch.go b/pkg/bloomcompactor/batch.go -index e0787c1f6f1ec..bed0834a86b74 100644 ---- a/pkg/bloomcompactor/batch.go -+++ b/pkg/bloomcompactor/batch.go -@@ -285,6 +285,7 @@ func (i *blockLoadingIter) Filter(filter func(*bloomshipper.CloseableBlockQuerie - func (i *blockLoadingIter) loadNext() bool { - // check if there are more overlapping groups to load - if !i.overlapping.Next() { -+ i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]() - return false - }",unknown,correctly set block iter when no overlapping blocks are found (#11973) -9be3c0863e28ade5c060fa09c5efc8e397da2f19,2023-10-31 18:31:01,Salva Corts,"Support categorized labels in Tailing (#11079) - -**What this PR does / why we need it**: - -This is a follow-up PR for https://github.com/grafana/loki/pull/10419 -adding support for tailing. - -I tested it on a dev cell and works fine. - - - -**Note**: With these changes, the JSON marshal unmarshal functions for -the tail are no longer used ([example][1]) so I think we can remove -them. Also, the new Tail response is no longer used, so we can also make -it an alias to the _legacy_ one. Let's do it on a follow-up PR to avoid -making this one bigger. - -[1]: -https://github.com/grafana/loki/blob/52a3f16039dd5ff655fc3681257d99794f620ec4/pkg/loghttp/entry.go#L210-L238",False,"diff --git a/pkg/ingester/tailer.go b/pkg/ingester/tailer.go -index 106fe25bbfe4d..72e7026e810e7 100644 ---- a/pkg/ingester/tailer.go -+++ b/pkg/ingester/tailer.go -@@ -151,7 +151,7 @@ func (t *tailer) processStream(stream logproto.Stream, lbs labels.Labels) []*log - - sp := t.pipeline.ForStream(lbs) - for _, e := range stream.Entries { -- newLine, parsedLbs, ok := sp.ProcessString(e.Timestamp.UnixNano(), e.Line) -+ newLine, parsedLbs, ok := sp.ProcessString(e.Timestamp.UnixNano(), e.Line, logproto.FromLabelAdaptersToLabels(e.StructuredMetadata)...) - if !ok { - continue - } -@@ -163,8 +163,10 @@ func (t *tailer) processStream(stream logproto.Stream, lbs labels.Labels) []*log - streams[parsedLbs.Hash()] = stream - } - stream.Entries = append(stream.Entries, logproto.Entry{ -- Timestamp: e.Timestamp, -- Line: newLine, -+ Timestamp: e.Timestamp, -+ Line: newLine, -+ StructuredMetadata: logproto.FromLabelsToLabelAdapters(parsedLbs.StructuredMetadata()), -+ Parsed: logproto.FromLabelsToLabelAdapters(parsedLbs.Parsed()), - }) - } - streamsResult := make([]*logproto.Stream, 0, len(streams)) -diff --git a/pkg/ingester/tailer_test.go b/pkg/ingester/tailer_test.go -index 9b06e4560018a..59293352030df 100644 ---- a/pkg/ingester/tailer_test.go -+++ b/pkg/ingester/tailer_test.go -@@ -93,10 +93,25 @@ func Test_dropstream(t *testing.T) { - } - } - --type fakeTailServer struct{} -+type fakeTailServer struct { -+ responses []logproto.TailResponse -+} -+ -+func (f *fakeTailServer) Send(response *logproto.TailResponse) error { -+ f.responses = append(f.responses, *response) -+ return nil -+ -+} -+ -+func (f *fakeTailServer) Context() context.Context { return context.Background() } - --func (f *fakeTailServer) Send(*logproto.TailResponse) error { return nil } --func (f *fakeTailServer) Context() context.Context { return context.Background() } -+func (f *fakeTailServer) GetResponses() []logproto.TailResponse { -+ return f.responses -+} -+ -+func (f *fakeTailServer) Reset() { -+ f.responses = f.responses[:0] -+} - - func Test_TailerSendRace(t *testing.T) { - tail, err := newTailer(""foo"", `{app=""foo""} |= ""foo""`, &fakeTailServer{}, 10) -@@ -137,3 +152,126 @@ func Test_IsMatching(t *testing.T) { - }) - } - } -+ -+func Test_StructuredMetadata(t *testing.T) { -+ lbs := makeRandomLabels() -+ -+ for _, tc := range []struct { -+ name string -+ query string -+ sentStream logproto.Stream -+ expectedResponses []logproto.TailResponse -+ }{ -+ { -+ // Optimization will make the same stream to be returned regardless of structured metadata. -+ name: ""noop pipeline"", -+ query: `{app=""foo""}`, -+ sentStream: logproto.Stream{ -+ Labels: lbs.String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 1), -+ Line: ""foo=1"", -+ }, -+ { -+ Timestamp: time.Unix(0, 2), -+ Line: ""foo=2"", -+ StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""traceID"", ""123"")), -+ }, -+ }, -+ }, -+ expectedResponses: []logproto.TailResponse{ -+ { -+ Stream: &logproto.Stream{ -+ Labels: lbs.String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 1), -+ Line: ""foo=1"", -+ }, -+ { -+ Timestamp: time.Unix(0, 2), -+ Line: ""foo=2"", -+ StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""traceID"", ""123"")), -+ }, -+ }, -+ }, -+ DroppedStreams: nil, -+ }, -+ }, -+ }, -+ { -+ name: ""parse pipeline labels"", -+ query: `{app=""foo""} | logfmt`, -+ sentStream: logproto.Stream{ -+ Labels: lbs.String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 1), -+ Line: ""foo=1"", -+ }, -+ { -+ Timestamp: time.Unix(0, 2), -+ Line: ""foo=2"", -+ StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""traceID"", ""123"")), -+ }, -+ }, -+ }, -+ expectedResponses: []logproto.TailResponse{ -+ { -+ Stream: &logproto.Stream{ -+ Labels: labels.NewBuilder(lbs).Set(""foo"", ""1"").Labels().String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 1), -+ Line: ""foo=1"", -+ Parsed: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""foo"", ""1"")), -+ }, -+ }, -+ }, -+ DroppedStreams: nil, -+ }, -+ { -+ Stream: &logproto.Stream{ -+ Labels: labels.NewBuilder(lbs).Set(""traceID"", ""123"").Set(""foo"", ""2"").Labels().String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 2), -+ Line: ""foo=2"", -+ StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""traceID"", ""123"")), -+ Parsed: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""foo"", ""2"")), -+ }, -+ }, -+ }, -+ DroppedStreams: nil, -+ }, -+ }, -+ }, -+ } { -+ t.Run(tc.name, func(t *testing.T) { -+ var server fakeTailServer -+ tail, err := newTailer(""foo"", tc.query, &server, 10) -+ require.NoError(t, err) -+ -+ var wg sync.WaitGroup -+ wg.Add(1) -+ go func() { -+ tail.loop() -+ wg.Done() -+ }() -+ -+ tail.send(tc.sentStream, lbs) -+ -+ // Wait for the stream to be received by the server. -+ require.Eventually(t, func() bool { -+ return len(server.GetResponses()) > 0 -+ }, 30*time.Second, 1*time.Second, ""stream was not received"") -+ -+ responses := server.GetResponses() -+ require.ElementsMatch(t, tc.expectedResponses, responses) -+ -+ tail.close() -+ wg.Wait() -+ }) -+ } -+} -diff --git a/pkg/iter/categorized_labels_iterator.go b/pkg/iter/categorized_labels_iterator.go -new file mode 100644 -index 0000000000000..1e95cad09a16e ---- /dev/null -+++ b/pkg/iter/categorized_labels_iterator.go -@@ -0,0 +1,71 @@ -+package iter -+ -+import ( -+ ""fmt"" -+ -+ ""github.com/prometheus/prometheus/model/labels"" -+ -+ ""github.com/grafana/loki/pkg/logproto"" -+ ""github.com/grafana/loki/pkg/logql/syntax"" -+) -+ -+type categorizeLabelsIterator struct { -+ EntryIterator -+ currEntry logproto.Entry -+ currStreamLabels string -+ currHash uint64 -+ currErr error -+} -+ -+func NewCategorizeLabelsIterator(wrap EntryIterator) EntryIterator { -+ return &categorizeLabelsIterator{ -+ EntryIterator: wrap, -+ } -+} -+ -+func (c *categorizeLabelsIterator) Next() bool { -+ if !c.EntryIterator.Next() { -+ return false -+ } -+ -+ c.currEntry = c.Entry() -+ if len(c.currEntry.StructuredMetadata) == 0 && len(c.currEntry.Parsed) == 0 { -+ c.currStreamLabels = c.EntryIterator.Labels() -+ c.currHash = c.EntryIterator.StreamHash() -+ return true -+ } -+ -+ // We need to remove the structured metadata labels and parsed labels from the stream labels. -+ streamLabels := c.EntryIterator.Labels() -+ lbls, err := syntax.ParseLabels(streamLabels) -+ if err != nil { -+ c.currErr = fmt.Errorf(""failed to parse series labels to categorize labels: %w"", err) -+ return false -+ } -+ -+ builder := labels.NewBuilder(lbls) -+ for _, label := range c.currEntry.StructuredMetadata { -+ builder.Del(label.Name) -+ } -+ for _, label := range c.currEntry.Parsed { -+ builder.Del(label.Name) -+ } -+ -+ newLabels := builder.Labels() -+ c.currStreamLabels = newLabels.String() -+ c.currHash = newLabels.Hash() -+ -+ return true -+} -+ -+func (c *categorizeLabelsIterator) Error() error { -+ return c.currErr -+} -+ -+func (c *categorizeLabelsIterator) Labels() string { -+ return c.currStreamLabels -+} -+ -+func (c *categorizeLabelsIterator) StreamHash() uint64 { -+ return c.currHash -+} -diff --git a/pkg/iter/categorized_labels_iterator_test.go b/pkg/iter/categorized_labels_iterator_test.go -new file mode 100644 -index 0000000000000..18259edfbf169 ---- /dev/null -+++ b/pkg/iter/categorized_labels_iterator_test.go -@@ -0,0 +1,145 @@ -+package iter -+ -+import ( -+ ""testing"" -+ ""time"" -+ -+ ""github.com/prometheus/prometheus/model/labels"" -+ ""github.com/stretchr/testify/require"" -+ -+ ""github.com/grafana/loki/pkg/logproto"" -+) -+ -+func TestNewCategorizeLabelsIterator(t *testing.T) { -+ for _, tc := range []struct { -+ name string -+ inner EntryIterator -+ expectedStreams []logproto.Stream -+ }{ -+ { -+ name: ""no structured metadata nor parsed labels"", -+ inner: NewSortEntryIterator([]EntryIterator{ -+ NewStreamIterator(logproto.Stream{ -+ Labels: labels.FromStrings(""namespace"", ""default"").String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 1), -+ Line: ""foo=1"", -+ }, -+ { -+ Timestamp: time.Unix(0, 2), -+ Line: ""foo=2"", -+ }, -+ }, -+ }), -+ }, logproto.FORWARD), -+ expectedStreams: []logproto.Stream{ -+ { -+ Labels: labels.FromStrings(""namespace"", ""default"").String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 1), -+ Line: ""foo=1"", -+ }, -+ { -+ Timestamp: time.Unix(0, 2), -+ Line: ""foo=2"", -+ }, -+ }, -+ }, -+ }, -+ }, -+ { -+ name: ""structured metadata and parsed labels"", -+ inner: NewSortEntryIterator([]EntryIterator{ -+ NewStreamIterator(logproto.Stream{ -+ Labels: labels.FromStrings(""namespace"", ""default"").String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 1), -+ Line: ""foo=1"", -+ }, -+ }, -+ }), -+ NewStreamIterator(logproto.Stream{ -+ Labels: labels.FromStrings(""namespace"", ""default"", ""traceID"", ""123"").String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 2), -+ Line: ""foo=2"", -+ StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""traceID"", ""123"")), -+ }, -+ }, -+ }), -+ NewStreamIterator(logproto.Stream{ -+ Labels: labels.FromStrings(""namespace"", ""default"", ""foo"", ""3"").String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 3), -+ Line: ""foo=3"", -+ Parsed: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""foo"", ""3"")), -+ }, -+ }, -+ }), -+ NewStreamIterator(logproto.Stream{ -+ Labels: labels.FromStrings(""namespace"", ""default"", ""traceID"", ""123"", ""foo"", ""4"").String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 4), -+ Line: ""foo=4"", -+ StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""traceID"", ""123"")), -+ Parsed: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""foo"", ""4"")), -+ }, -+ }, -+ }), -+ }, logproto.FORWARD), -+ expectedStreams: []logproto.Stream{ -+ { -+ Labels: labels.FromStrings(""namespace"", ""default"").String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 1), -+ Line: ""foo=1"", -+ }, -+ { -+ Timestamp: time.Unix(0, 2), -+ Line: ""foo=2"", -+ StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""traceID"", ""123"")), -+ }, -+ { -+ Timestamp: time.Unix(0, 3), -+ Line: ""foo=3"", -+ Parsed: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""foo"", ""3"")), -+ }, -+ { -+ Timestamp: time.Unix(0, 4), -+ Line: ""foo=4"", -+ StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""traceID"", ""123"")), -+ Parsed: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""foo"", ""4"")), -+ }, -+ }, -+ }, -+ }, -+ }, -+ } { -+ t.Run(tc.name, func(t *testing.T) { -+ itr := NewCategorizeLabelsIterator(tc.inner) -+ -+ streamsEntries := make(map[string][]logproto.Entry) -+ for itr.Next() { -+ streamsEntries[itr.Labels()] = append(streamsEntries[itr.Labels()], itr.Entry()) -+ require.NoError(t, itr.Error()) -+ } -+ -+ var streams []logproto.Stream -+ for lbls, entries := range streamsEntries { -+ streams = append(streams, logproto.Stream{ -+ Labels: lbls, -+ Entries: entries, -+ }) -+ } -+ -+ require.ElementsMatch(t, tc.expectedStreams, streams) -+ }) -+ } -+} -diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go -index 8bdcfe8501fc0..1edf86da3ed58 100644 ---- a/pkg/logql/engine.go -+++ b/pkg/logql/engine.go -@@ -287,16 +287,18 @@ func (q *query) Eval(ctx context.Context) (promql_parser.Value, error) { - return value, err - - case syntax.LogSelectorExpr: -- iter, err := q.evaluator.NewIterator(ctx, e, q.params) -+ itr, err := q.evaluator.NewIterator(ctx, e, q.params) - if err != nil { - return nil, err - } - - encodingFlags := httpreq.ExtractEncodingFlagsFromCtx(ctx) -- categorizeLabels := encodingFlags.Has(httpreq.FlagCategorizeLabels) -+ if encodingFlags.Has(httpreq.FlagCategorizeLabels) { -+ itr = iter.NewCategorizeLabelsIterator(itr) -+ } - -- defer util.LogErrorWithContext(ctx, ""closing iterator"", iter.Close) -- streams, err := readStreams(iter, q.params.Limit(), q.params.Direction(), q.params.Interval(), categorizeLabels) -+ defer util.LogErrorWithContext(ctx, ""closing iterator"", itr.Close) -+ streams, err := readStreams(itr, q.params.Limit(), q.params.Direction(), q.params.Interval(), true) - return streams, err - default: - return nil, fmt.Errorf(""unexpected type (%T): cannot evaluate"", e) -@@ -513,7 +515,7 @@ func readStreams(i iter.EntryIterator, size uint32, dir logproto.Direction, inte - // value here because many unit tests start at time.Unix(0,0) - lastEntry := lastEntryMinTime - for respSize < size && i.Next() { -- entry := i.Entry() -+ streamLabels, entry := i.Labels(), i.Entry() - - forwardShouldOutput := dir == logproto.FORWARD && - (entry.Timestamp.Equal(lastEntry.Add(interval)) || entry.Timestamp.After(lastEntry.Add(interval))) -@@ -524,27 +526,6 @@ func readStreams(i iter.EntryIterator, size uint32, dir logproto.Direction, inte - // If lastEntry.Unix < 0 this is the first pass through the loop and we should output the line. - // Then check to see if the entry is equal to, or past a forward or reverse step - if interval == 0 || lastEntry.Unix() < 0 || forwardShouldOutput || backwardShouldOutput { -- streamLabels := i.Labels() -- -- // If categorizeLabels is true, We need to remove the structured metadata labels and parsed labels from the stream labels. -- // TODO(salvacorts): If this is too slow, provided this is in the hot path, we can consider doing this in the iterator. -- if categorizeLabels && (len(entry.StructuredMetadata) > 0 || len(entry.Parsed) > 0) { -- lbls, err := syntax.ParseLabels(streamLabels) -- if err != nil { -- return nil, fmt.Errorf(""failed to parse series labels to categorize labels: %w"", err) -- } -- -- builder := labels.NewBuilder(lbls) -- for _, label := range entry.StructuredMetadata { -- builder.Del(label.Name) -- } -- for _, label := range entry.Parsed { -- builder.Del(label.Name) -- } -- -- streamLabels = builder.Labels().String() -- } -- - stream, ok := streams[streamLabels] - if !ok { - stream = &logproto.Stream{ -diff --git a/pkg/querier/http.go b/pkg/querier/http.go -index 3bf777659a898..b6ba4750aec40 100644 ---- a/pkg/querier/http.go -+++ b/pkg/querier/http.go -@@ -145,6 +145,9 @@ func (q *QuerierAPI) TailHandler(w http.ResponseWriter, r *http.Request) { - return - } - -+ encodingFlags := httpreq.ExtractEncodingFlags(r) -+ version := loghttp.GetVersion(r.RequestURI) -+ - conn, err := upgrader.Upgrade(w, r, nil) - if err != nil { - level.Error(logger).Log(""msg"", ""Error in upgrading websocket"", ""err"", err) -@@ -163,7 +166,7 @@ func (q *QuerierAPI) TailHandler(w http.ResponseWriter, r *http.Request) { - } - }() - -- tailer, err := q.querier.Tail(r.Context(), req) -+ tailer, err := q.querier.Tail(r.Context(), req, encodingFlags.Has(httpreq.FlagCategorizeLabels)) - if err != nil { - if err := conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseInternalServerErr, err.Error())); err != nil { - level.Error(logger).Log(""msg"", ""Error connecting to ingesters for tailing"", ""err"", err) -@@ -179,6 +182,8 @@ func (q *QuerierAPI) TailHandler(w http.ResponseWriter, r *http.Request) { - ticker := time.NewTicker(wsPingPeriod) - defer ticker.Stop() - -+ connWriter := marshal.NewWebsocketJSONWriter(conn) -+ - var response *loghttp_legacy.TailResponse - responseChan := tailer.getResponseChan() - closeErrChan := tailer.getCloseErrorChan() -@@ -209,8 +214,8 @@ func (q *QuerierAPI) TailHandler(w http.ResponseWriter, r *http.Request) { - select { - case response = <-responseChan: - var err error -- if loghttp.GetVersion(r.RequestURI) == loghttp.VersionV1 { -- err = marshal.WriteTailResponseJSON(*response, conn) -+ if version == loghttp.VersionV1 { -+ err = marshal.WriteTailResponseJSON(*response, connWriter, encodingFlags) - } else { - err = marshal_legacy.WriteTailResponseJSON(*response, conn) - } -diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go -index 8295f02c644c0..23e2c89f5e2b0 100644 ---- a/pkg/querier/querier.go -+++ b/pkg/querier/querier.go -@@ -88,7 +88,7 @@ type Querier interface { - logql.Querier - Label(ctx context.Context, req *logproto.LabelRequest) (*logproto.LabelResponse, error) - Series(ctx context.Context, req *logproto.SeriesRequest) (*logproto.SeriesResponse, error) -- Tail(ctx context.Context, req *logproto.TailRequest) (*Tailer, error) -+ Tail(ctx context.Context, req *logproto.TailRequest, categorizedLabels bool) (*Tailer, error) - IndexStats(ctx context.Context, req *loghttp.RangeQuery) (*stats.Stats, error) - Volume(ctx context.Context, req *logproto.VolumeRequest) (*logproto.VolumeResponse, error) - } -@@ -434,7 +434,7 @@ func (*SingleTenantQuerier) Check(_ context.Context, _ *grpc_health_v1.HealthChe - } - - // Tail keeps getting matching logs from all ingesters for given query --func (q *SingleTenantQuerier) Tail(ctx context.Context, req *logproto.TailRequest) (*Tailer, error) { -+func (q *SingleTenantQuerier) Tail(ctx context.Context, req *logproto.TailRequest, categorizedLabels bool) (*Tailer, error) { - err := q.checkTailRequestLimit(ctx) - if err != nil { - return nil, err -@@ -496,6 +496,7 @@ func (q *SingleTenantQuerier) Tail(ctx context.Context, req *logproto.TailReques - }, - q.cfg.TailMaxDuration, - tailerWaitEntryThrottle, -+ categorizedLabels, - q.metrics, - ), nil - } -diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go -index aef47440326b9..fa2de7590465f 100644 ---- a/pkg/querier/querier_mock_test.go -+++ b/pkg/querier/querier_mock_test.go -@@ -530,7 +530,7 @@ func (q *querierMock) Series(ctx context.Context, req *logproto.SeriesRequest) ( - return args.Get(0).(func() *logproto.SeriesResponse)(), args.Error(1) - } - --func (q *querierMock) Tail(_ context.Context, _ *logproto.TailRequest) (*Tailer, error) { -+func (q *querierMock) Tail(_ context.Context, _ *logproto.TailRequest, _ bool) (*Tailer, error) { - return nil, errors.New(""querierMock.Tail() has not been mocked"") - } - -diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go -index c585a5386c55c..2de92994256dd 100644 ---- a/pkg/querier/querier_test.go -+++ b/pkg/querier/querier_test.go -@@ -118,7 +118,7 @@ func TestQuerier_Tail_QueryTimeoutConfigFlag(t *testing.T) { - require.NoError(t, err) - - ctx := user.InjectOrgID(context.Background(), ""test"") -- _, err = q.Tail(ctx, &request) -+ _, err = q.Tail(ctx, &request, false) - require.NoError(t, err) - - calls := ingesterClient.GetMockedCallsByMethod(""Query"") -@@ -512,7 +512,7 @@ func TestQuerier_concurrentTailLimits(t *testing.T) { - require.NoError(t, err) - - ctx := user.InjectOrgID(context.Background(), ""test"") -- _, err = q.Tail(ctx, &request) -+ _, err = q.Tail(ctx, &request, false) - assert.Equal(t, testData.expectedError, err) - }) - } -diff --git a/pkg/querier/tail.go b/pkg/querier/tail.go -index 09e785a13b3e6..bccedfb7c532e 100644 ---- a/pkg/querier/tail.go -+++ b/pkg/querier/tail.go -@@ -50,11 +50,12 @@ type Tailer struct { - querierTailClients map[string]logproto.Querier_TailClient // addr -> grpc clients for tailing logs from ingesters - querierTailClientsMtx sync.RWMutex - -- stopped bool -- delayFor time.Duration -- responseChan chan *loghttp.TailResponse -- closeErrChan chan error -- tailMaxDuration time.Duration -+ stopped bool -+ delayFor time.Duration -+ responseChan chan *loghttp.TailResponse -+ closeErrChan chan error -+ tailMaxDuration time.Duration -+ categorizeLabels bool - - // if we are not seeing any response from ingester, - // how long do we want to wait by going into sleep -@@ -234,7 +235,12 @@ func (t *Tailer) pushTailResponseFromIngester(resp *logproto.TailResponse) { - t.streamMtx.Lock() - defer t.streamMtx.Unlock() - -- t.openStreamIterator.Push(iter.NewStreamIterator(*resp.Stream)) -+ itr := iter.NewStreamIterator(*resp.Stream) -+ if t.categorizeLabels { -+ itr = iter.NewCategorizeLabelsIterator(itr) -+ } -+ -+ t.openStreamIterator.Push(itr) - } - - // finds oldest entry by peeking at open stream iterator. -@@ -305,10 +311,16 @@ func newTailer( - tailDisconnectedIngesters func([]string) (map[string]logproto.Querier_TailClient, error), - tailMaxDuration time.Duration, - waitEntryThrottle time.Duration, -+ categorizeLabels bool, - m *Metrics, - ) *Tailer { -+ historicEntriesIter := historicEntries -+ if categorizeLabels { -+ historicEntriesIter = iter.NewCategorizeLabelsIterator(historicEntries) -+ } -+ - t := Tailer{ -- openStreamIterator: iter.NewMergeEntryIterator(context.Background(), []iter.EntryIterator{historicEntries}, logproto.FORWARD), -+ openStreamIterator: iter.NewMergeEntryIterator(context.Background(), []iter.EntryIterator{historicEntriesIter}, logproto.FORWARD), - querierTailClients: querierTailClients, - delayFor: delayFor, - responseChan: make(chan *loghttp.TailResponse, maxBufferedTailResponses), -@@ -317,6 +329,7 @@ func newTailer( - tailDisconnectedIngesters: tailDisconnectedIngesters, - tailMaxDuration: tailMaxDuration, - waitEntryThrottle: waitEntryThrottle, -+ categorizeLabels: categorizeLabels, - metrics: m, - } - -diff --git a/pkg/querier/tail_test.go b/pkg/querier/tail_test.go -index a2186cd614f73..32c6bed36ed2d 100644 ---- a/pkg/querier/tail_test.go -+++ b/pkg/querier/tail_test.go -@@ -5,6 +5,7 @@ import ( - ""testing"" - ""time"" - -+ ""github.com/prometheus/prometheus/model/labels"" - ""github.com/stretchr/testify/assert"" - ""github.com/stretchr/testify/require"" - -@@ -161,7 +162,7 @@ func TestTailer(t *testing.T) { - tailClients[""test""] = test.tailClient - } - -- tailer := newTailer(0, tailClients, test.historicEntries, tailDisconnectedIngesters, timeout, throttle, NewMetrics(nil)) -+ tailer := newTailer(0, tailClients, test.historicEntries, tailDisconnectedIngesters, timeout, throttle, false, NewMetrics(nil)) - defer tailer.close() - - test.tester(t, tailer, test.tailClient) -@@ -169,6 +170,214 @@ func TestTailer(t *testing.T) { - } - } - -+func TestCategorizedLabels(t *testing.T) { -+ t.Parallel() -+ -+ lbs := labels.FromStrings(""app"", ""foo"") -+ createHistoricalEntries := func() iter.EntryIterator { -+ return iter.NewStreamIterator(logproto.Stream{ -+ Labels: lbs.String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(1, 0), -+ Line: ""foo=1"", -+ }, -+ { -+ Timestamp: time.Unix(2, 0), -+ Line: ""foo=2"", -+ StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""traceID"", ""123"")), -+ }, -+ }, -+ }) -+ } -+ createTailClients := func() map[string]*tailClientMock { -+ return map[string]*tailClientMock{ -+ ""test1"": newTailClientMock().mockRecvWithTrigger(mockTailResponse(logproto.Stream{ -+ Labels: lbs.String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(3, 0), -+ Line: ""foo=3"", -+ }, -+ }, -+ })), -+ ""test2"": newTailClientMock().mockRecvWithTrigger(mockTailResponse(logproto.Stream{ -+ Labels: labels.NewBuilder(lbs).Set(""traceID"", ""123"").Labels().String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(4, 0), -+ Line: ""foo=4"", -+ StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""traceID"", ""123"")), -+ }, -+ }, -+ })), -+ ""test3"": newTailClientMock().mockRecvWithTrigger(mockTailResponse(logproto.Stream{ -+ Labels: labels.NewBuilder(lbs).Set(""traceID"", ""123"").Set(""foo"", ""5"").Labels().String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(5, 0), -+ Line: ""foo=5"", -+ StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""traceID"", ""123"")), -+ Parsed: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""foo"", ""5"")), -+ }, -+ }, -+ })), -+ } -+ } -+ -+ for _, tc := range []struct { -+ name string -+ categorizeLabels bool -+ historicEntries iter.EntryIterator -+ tailClients map[string]*tailClientMock -+ expectedStreams []logproto.Stream -+ }{ -+ { -+ name: ""without categorize"", -+ categorizeLabels: false, -+ historicEntries: createHistoricalEntries(), -+ tailClients: createTailClients(), -+ expectedStreams: []logproto.Stream{ -+ { -+ Labels: lbs.String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(1, 0), -+ Line: ""foo=1"", -+ }, -+ }, -+ }, -+ { -+ Labels: lbs.String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(2, 0), -+ Line: ""foo=2"", -+ StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""traceID"", ""123"")), -+ }, -+ }, -+ }, -+ { -+ Labels: lbs.String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(3, 0), -+ Line: ""foo=3"", -+ }, -+ }, -+ }, -+ { -+ Labels: labels.NewBuilder(lbs).Set(""traceID"", ""123"").Labels().String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(4, 0), -+ Line: ""foo=4"", -+ StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""traceID"", ""123"")), -+ }, -+ }, -+ }, -+ { -+ Labels: labels.NewBuilder(lbs).Set(""traceID"", ""123"").Set(""foo"", ""5"").Labels().String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(5, 0), -+ Line: ""foo=5"", -+ StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""traceID"", ""123"")), -+ Parsed: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""foo"", ""5"")), -+ }, -+ }, -+ }, -+ }, -+ }, -+ { -+ name: ""categorize"", -+ categorizeLabels: true, -+ historicEntries: createHistoricalEntries(), -+ tailClients: createTailClients(), -+ expectedStreams: []logproto.Stream{ -+ { -+ Labels: lbs.String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(1, 0), -+ Line: ""foo=1"", -+ }, -+ }, -+ }, -+ { -+ Labels: lbs.String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(2, 0), -+ Line: ""foo=2"", -+ StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""traceID"", ""123"")), -+ }, -+ }, -+ }, -+ { -+ Labels: lbs.String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(3, 0), -+ Line: ""foo=3"", -+ }, -+ }, -+ }, -+ { -+ Labels: lbs.String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(4, 0), -+ Line: ""foo=4"", -+ StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""traceID"", ""123"")), -+ }, -+ }, -+ }, -+ { -+ Labels: lbs.String(), -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(5, 0), -+ Line: ""foo=5"", -+ StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""traceID"", ""123"")), -+ Parsed: logproto.FromLabelsToLabelAdapters(labels.FromStrings(""foo"", ""5"")), -+ }, -+ }, -+ }, -+ }, -+ }, -+ } { -+ t.Run(tc.name, func(t *testing.T) { -+ tailDisconnectedIngesters := func([]string) (map[string]logproto.Querier_TailClient, error) { -+ return map[string]logproto.Querier_TailClient{}, nil -+ } -+ -+ tailClients := map[string]logproto.Querier_TailClient{} -+ for k, v := range tc.tailClients { -+ tailClients[k] = v -+ } -+ -+ tailer := newTailer(0, tailClients, tc.historicEntries, tailDisconnectedIngesters, timeout, throttle, tc.categorizeLabels, NewMetrics(nil)) -+ defer tailer.close() -+ -+ // Make tail clients receive their responses -+ for _, client := range tc.tailClients { -+ client.triggerRecv() -+ } -+ -+ err := waitUntilTailerOpenStreamsHaveBeenConsumed(tailer) -+ require.NoError(t, err) -+ -+ maxEntries := countEntriesInStreams(tc.expectedStreams) -+ responses, err := readFromTailer(tailer, maxEntries) -+ require.NoError(t, err) -+ -+ streams := flattenStreamsFromResponses(responses) -+ require.ElementsMatch(t, tc.expectedStreams, streams) -+ }) -+ } -+} -+ - func readFromTailer(tailer *Tailer, maxEntries int) ([]*loghttp.TailResponse, error) { - responses := make([]*loghttp.TailResponse, 0) - entriesCount := 0 -@@ -204,7 +413,7 @@ func waitUntilTailerOpenStreamsHaveBeenConsumed(tailer *Tailer) error { - - select { - case <-timeoutTicker.C: -- return errors.New(""timeout expired while reading responses from Tailer"") -+ return errors.New(""timeout expired while waiting for Tailer to consume open streams"") - default: - time.Sleep(throttle) - } -diff --git a/pkg/util/httpreq/encoding_flags.go b/pkg/util/httpreq/encoding_flags.go -index 89656618eb60d..232f2bc4e0f99 100644 ---- a/pkg/util/httpreq/encoding_flags.go -+++ b/pkg/util/httpreq/encoding_flags.go -@@ -71,11 +71,7 @@ func AddEncodingFlagsToContext(ctx context.Context, flags EncodingFlags) context - - func ExtractEncodingFlags(req *http.Request) EncodingFlags { - rawValue := req.Header.Get(LokiEncodingFlagsHeader) -- if rawValue == """" { -- return nil -- } -- -- return parseEncodingFlags(rawValue) -+ return ParseEncodingFlags(rawValue) - } - - func ExtractEncodingFlagsFromProto(req *httpgrpc.HTTPRequest) EncodingFlags { -@@ -83,11 +79,7 @@ func ExtractEncodingFlagsFromProto(req *httpgrpc.HTTPRequest) EncodingFlags { - for _, header := range req.GetHeaders() { - if header.GetKey() == LokiEncodingFlagsHeader { - rawValue = header.GetValues()[0] -- if rawValue == """" { -- return nil -- } -- -- return parseEncodingFlags(rawValue) -+ return ParseEncodingFlags(rawValue) - } - } - -@@ -100,10 +92,14 @@ func ExtractEncodingFlagsFromCtx(ctx context.Context) EncodingFlags { - return nil - } - -- return parseEncodingFlags(rawValue) -+ return ParseEncodingFlags(rawValue) - } - --func parseEncodingFlags(rawFlags string) EncodingFlags { -+func ParseEncodingFlags(rawFlags string) EncodingFlags { -+ if rawFlags == """" { -+ return nil -+ } -+ - split := strings.Split(rawFlags, EncodeFlagsDelimiter) - flags := make(EncodingFlags, len(split)) - for _, rawFlag := range split { -diff --git a/pkg/util/marshal/marshal.go b/pkg/util/marshal/marshal.go -index 562808b300232..fd28907d0579a 100644 ---- a/pkg/util/marshal/marshal.go -+++ b/pkg/util/marshal/marshal.go -@@ -81,18 +81,38 @@ type WebsocketWriter interface { - WriteMessage(int, []byte) error - } - --// WriteTailResponseJSON marshals the legacy.TailResponse to v1 loghttp JSON and --// then writes it to the provided connection. --func WriteTailResponseJSON(r legacy.TailResponse, c WebsocketWriter) error { -- v1Response, err := NewTailResponse(r) -+type websocketJSONWriter struct { -+ WebsocketWriter -+} -+ -+func (w *websocketJSONWriter) Write(p []byte) (n int, err error) { -+ err = w.WriteMessage(websocket.TextMessage, p) - if err != nil { -- return err -+ return 0, err - } -- data, err := jsoniter.Marshal(v1Response) -+ return len(p), nil -+} -+ -+func NewWebsocketJSONWriter(ws WebsocketWriter) io.Writer { -+ return &websocketJSONWriter{ws} -+} -+ -+// WriteTailResponseJSON marshals the legacy.TailResponse to v1 loghttp JSON and -+// then writes it to the provided writer. -+func WriteTailResponseJSON(r legacy.TailResponse, w io.Writer, encodeFlags httpreq.EncodingFlags) error { -+ // TODO(salvacorts): I think we can dismiss the new TailResponse and be an alias of legacy.TailResponse -+ // v1Response, err := NewTailResponse(r) -+ // if err != nil { -+ // return err -+ // } -+ s := jsoniter.ConfigFastest.BorrowStream(w) -+ defer jsoniter.ConfigFastest.ReturnStream(s) -+ -+ err := EncodeTailResult(r, s, encodeFlags) - if err != nil { -- return err -+ return fmt.Errorf(""could not write JSON tail response: %w"", err) - } -- return c.WriteMessage(websocket.TextMessage, data) -+ return s.Flush() - } - - // WriteSeriesResponseJSON marshals a logproto.SeriesResponse to v1 loghttp JSON and then -diff --git a/pkg/util/marshal/marshal_test.go b/pkg/util/marshal/marshal_test.go -index 070f7b0ed4012..fa8cc5d8aa3e5 100644 ---- a/pkg/util/marshal/marshal_test.go -+++ b/pkg/util/marshal/marshal_test.go -@@ -410,7 +410,6 @@ var labelTests = []struct { - } - - // covers responses from /loki/api/v1/tail --// TODO(salvacorts): Support encoding flags. And fix serialized structured metadata labels which shouldn't be there unless the categorize flag is set. - var tailTests = []struct { - actual legacy.TailResponse - expected string -@@ -451,7 +450,7 @@ var tailTests = []struct { - }, - ""values"":[ - [ ""123456789012345"", ""super line""], -- [ ""123456789012346"", ""super line with labels"", { ""foo"": ""a"", ""bar"": ""b"" } ] -+ [ ""123456789012346"", ""super line with labels"" ] - ] - } - ], -@@ -467,6 +466,90 @@ var tailTests = []struct { - }, - } - -+var tailTestWithEncodingFlags = []struct { -+ actual legacy.TailResponse -+ encodingFlags httpreq.EncodingFlags -+ expected string -+}{ -+ { -+ actual: legacy.TailResponse{ -+ Streams: []logproto.Stream{ -+ { -+ Entries: []logproto.Entry{ -+ { -+ Timestamp: time.Unix(0, 123456789012345), -+ Line: ""super line"", -+ }, -+ { -+ Timestamp: time.Unix(0, 123456789012346), -+ Line: ""super line with labels"", -+ StructuredMetadata: []logproto.LabelAdapter{ -+ {Name: ""foo"", Value: ""a""}, -+ {Name: ""bar"", Value: ""b""}, -+ }, -+ }, -+ { -+ Timestamp: time.Unix(0, 123456789012347), -+ Line: ""super line with labels msg=text"", -+ StructuredMetadata: []logproto.LabelAdapter{ -+ {Name: ""foo"", Value: ""a""}, -+ {Name: ""bar"", Value: ""b""}, -+ }, -+ Parsed: []logproto.LabelAdapter{ -+ {Name: ""msg"", Value: ""text""}, -+ }, -+ }, -+ }, -+ Labels: `{test=""test""}`, -+ }, -+ }, -+ DroppedEntries: []legacy.DroppedEntry{ -+ { -+ Timestamp: time.Unix(0, 123456789022345), -+ Labels: ""{test=\""test\""}"", -+ }, -+ }, -+ }, -+ encodingFlags: httpreq.NewEncodingFlags(httpreq.FlagCategorizeLabels), -+ expected: fmt.Sprintf(`{ -+ ""streams"": [ -+ { -+ ""stream"": { -+ ""test"": ""test"" -+ }, -+ ""values"":[ -+ [ ""123456789012345"", ""super line""], -+ [ ""123456789012346"", ""super line with labels"", { -+ ""structuredMetadata"": { -+ ""foo"": ""a"", -+ ""bar"": ""b"" -+ } -+ }], -+ [ ""123456789012347"", ""super line with labels msg=text"", { -+ ""structuredMetadata"": { -+ ""foo"": ""a"", -+ ""bar"": ""b"" -+ }, -+ ""parsed"": { -+ ""msg"": ""text"" -+ } -+ }] -+ ] -+ } -+ ], -+ ""dropped_entries"": [ -+ { -+ ""timestamp"": ""123456789022345"", -+ ""labels"": { -+ ""test"": ""test"" -+ } -+ } -+ ], -+ ""encodingFlags"": [""%s""] -+ }`, httpreq.FlagCategorizeLabels), -+ }, -+} -+ - func Test_WriteQueryResponseJSON(t *testing.T) { - for i, queryTest := range queryTests { - var b bytes.Buffer -@@ -515,15 +598,18 @@ func Test_WriteQueryResponseJSONWithError(t *testing.T) { - - func Test_MarshalTailResponse(t *testing.T) { - for i, tailTest := range tailTests { -- // convert logproto to model objects -- model, err := NewTailResponse(tailTest.actual) -+ var b bytes.Buffer -+ err := WriteTailResponseJSON(tailTest.actual, &b, nil) - require.NoError(t, err) - -- // marshal model object -- bytes, err := json.Marshal(model) -+ require.JSONEqf(t, tailTest.expected, b.String(), ""Tail Test %d failed"", i) -+ } -+ for i, tailTest := range tailTestWithEncodingFlags { -+ var b bytes.Buffer -+ err := WriteTailResponseJSON(tailTest.actual, &b, tailTest.encodingFlags) - require.NoError(t, err) - -- require.JSONEqf(t, tailTest.expected, string(bytes), ""Tail Test %d failed"", i) -+ require.JSONEqf(t, tailTest.expected, b.String(), ""Tail Test %d failed"", i) - } - } - -@@ -925,10 +1011,11 @@ func Test_WriteTailResponseJSON(t *testing.T) { - {Timestamp: time.Unix(0, 2), Labels: `{app=""dropped""}`}, - }, - }, -- WebsocketWriterFunc(func(i int, b []byte) error { -+ NewWebsocketJSONWriter(WebsocketWriterFunc(func(i int, b []byte) error { - require.Equal(t, `{""streams"":[{""stream"":{""app"":""foo""},""values"":[[""1"",""foobar""]]}],""dropped_entries"":[{""timestamp"":""2"",""labels"":{""app"":""dropped""}}]}`, string(b)) - return nil -- }), -+ })), -+ nil, - ), - ) - } -diff --git a/pkg/util/marshal/query.go b/pkg/util/marshal/query.go -index fb6aead8a76ee..b048b0a952f87 100644 ---- a/pkg/util/marshal/query.go -+++ b/pkg/util/marshal/query.go -@@ -13,6 +13,7 @@ import ( - ""github.com/prometheus/prometheus/promql/parser"" - - ""github.com/grafana/loki/pkg/loghttp"" -+ legacy ""github.com/grafana/loki/pkg/loghttp/legacy"" - ""github.com/grafana/loki/pkg/logproto"" - ""github.com/grafana/loki/pkg/logqlmodel"" - ""github.com/grafana/loki/pkg/logqlmodel/stats"" -@@ -191,6 +192,60 @@ func EncodeResult(data parser.Value, statistics stats.Result, s *jsoniter.Stream - return nil - } - -+func EncodeTailResult(data legacy.TailResponse, s *jsoniter.Stream, encodeFlags httpreq.EncodingFlags) error { -+ s.WriteObjectStart() -+ s.WriteObjectField(""streams"") -+ err := encodeStreams(data.Streams, s, encodeFlags) -+ if err != nil { -+ return err -+ } -+ -+ if len(data.DroppedEntries) > 0 { -+ s.WriteMore() -+ s.WriteObjectField(""dropped_entries"") -+ err = encodeDroppedEntries(data.DroppedEntries, s) -+ if err != nil { -+ return err -+ } -+ } -+ -+ if len(encodeFlags) > 0 { -+ s.WriteMore() -+ s.WriteObjectField(""encodingFlags"") -+ if err := encodeEncodingFlags(s, encodeFlags); err != nil { -+ return err -+ } -+ } -+ -+ s.WriteObjectEnd() -+ return nil -+} -+ -+func encodeDroppedEntries(entries []legacy.DroppedEntry, s *jsoniter.Stream) error { -+ s.WriteArrayStart() -+ defer s.WriteArrayEnd() -+ -+ for i, entry := range entries { -+ if i > 0 { -+ s.WriteMore() -+ } -+ -+ ds, err := NewDroppedStream(&entry) -+ if err != nil { -+ return err -+ } -+ -+ jsonEntry, err := ds.MarshalJSON() -+ if err != nil { -+ return err -+ } -+ -+ s.WriteRaw(string(jsonEntry)) -+ } -+ -+ return nil -+} -+ - func encodeEncodingFlags(s *jsoniter.Stream, flags httpreq.EncodingFlags) error { - s.WriteArrayStart() - defer s.WriteArrayEnd() -@@ -329,7 +384,6 @@ func encodeStream(stream logproto.Stream, s *jsoniter.Stream, encodeFlags httpre - encodeLabels(logproto.FromLabelsToLabelAdapters(lbls), s) - - s.WriteObjectEnd() -- s.Flush() - - s.WriteMore() - s.WriteObjectField(""values"") -@@ -373,8 +427,6 @@ func encodeStream(stream logproto.Stream, s *jsoniter.Stream, encodeFlags httpre - s.WriteObjectEnd() - } - s.WriteArrayEnd() -- -- s.Flush() - } - - s.WriteArrayEnd() -diff --git a/pkg/util/marshal/tail.go b/pkg/util/marshal/tail.go -index 5655aee09c288..222b76c046b7d 100644 ---- a/pkg/util/marshal/tail.go -+++ b/pkg/util/marshal/tail.go -@@ -5,32 +5,6 @@ import ( - legacy ""github.com/grafana/loki/pkg/loghttp/legacy"" - ) - --// NewTailResponse constructs a TailResponse from a legacy.TailResponse --func NewTailResponse(r legacy.TailResponse) (loghttp.TailResponse, error) { -- var err error -- ret := loghttp.TailResponse{ -- Streams: make([]loghttp.Stream, len(r.Streams)), -- DroppedStreams: make([]loghttp.DroppedStream, len(r.DroppedEntries)), -- } -- -- for i, s := range r.Streams { -- ret.Streams[i], err = NewStream(s) -- -- if err != nil { -- return loghttp.TailResponse{}, err -- } -- } -- -- for i, d := range r.DroppedEntries { -- ret.DroppedStreams[i], err = NewDroppedStream(&d) -- if err != nil { -- return loghttp.TailResponse{}, err -- } -- } -- -- return ret, nil --} -- - // NewDroppedStream constructs a DroppedStream from a legacy.DroppedEntry - func NewDroppedStream(s *legacy.DroppedEntry) (loghttp.DroppedStream, error) { - l, err := NewLabelSet(s.Labels) -diff --git a/pkg/util/unmarshal/unmarshal_test.go b/pkg/util/unmarshal/unmarshal_test.go -index 9fdaf27512127..93372f62ebef1 100644 ---- a/pkg/util/unmarshal/unmarshal_test.go -+++ b/pkg/util/unmarshal/unmarshal_test.go -@@ -224,6 +224,7 @@ func (ws *websocket) ReadMessage() (int, []byte, error) { - - func Test_ReadTailResponse(t *testing.T) { - ws := &websocket{} -+ wsJSON := marshal.NewWebsocketJSONWriter(ws) - require.NoError(t, marshal.WriteTailResponseJSON(legacy_loghttp.TailResponse{ - Streams: []logproto.Stream{ - {Labels: `{app=""bar""}`, Entries: []logproto.Entry{{Timestamp: time.Unix(0, 2), Line: ""2""}}}, -@@ -231,7 +232,7 @@ func Test_ReadTailResponse(t *testing.T) { - DroppedEntries: []legacy_loghttp.DroppedEntry{ - {Timestamp: time.Unix(0, 1), Labels: `{app=""foo""}`}, - }, -- }, ws)) -+ }, wsJSON, nil)) - res := &loghttp.TailResponse{} - require.NoError(t, ReadTailResponseJSON(res, ws))",unknown,"Support categorized labels in Tailing (#11079) - -**What this PR does / why we need it**: - -This is a follow-up PR for https://github.com/grafana/loki/pull/10419 -adding support for tailing. - -I tested it on a dev cell and works fine. - - - -**Note**: With these changes, the JSON marshal unmarshal functions for -the tail are no longer used ([example][1]) so I think we can remove -them. Also, the new Tail response is no longer used, so we can also make -it an alias to the _legacy_ one. Let's do it on a follow-up PR to avoid -making this one bigger. - -[1]: -https://github.com/grafana/loki/blob/52a3f16039dd5ff655fc3681257d99794f620ec4/pkg/loghttp/entry.go#L210-L238" -5ea38fbd51a075ac6664c81d507b71abb9de722e,2022-09-02 23:48:00,Ashwanth,"tsdb: build rotated heads asynchronously (#6956) - - - -**What this PR does / why we need it**: -Currently `Append` call to the head has to wait for block creation to complete when rotating head. -And since we rotate head every 15m (default period), this would result in latency spikes at these times. -This PR moves the TSDB build step to an async routine. - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - - -**Checklist** -- [ ] Documentation added -- [ ] Tests updated -- [ ] Is this an important fix or new feature? Add an entry in the `CHANGELOG.md`. -- [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/upgrading/_index.md` - -Signed-off-by: Ashwanth Goli -Co-authored-by: Owen Diehl ",False,"diff --git a/pkg/storage/stores/tsdb/head.go b/pkg/storage/stores/tsdb/head.go -index 19e79ec767253..5a9290484549d 100644 ---- a/pkg/storage/stores/tsdb/head.go -+++ b/pkg/storage/stores/tsdb/head.go -@@ -61,6 +61,8 @@ type Metrics struct { - tsdbCreationFailures prometheus.Counter - tsdbManagerUpdatesTotal prometheus.Counter - tsdbManagerUpdatesFailedTotal prometheus.Counter -+ tsdbHeadRotationsTotal prometheus.Counter -+ tsdbHeadRotationsFailedTotal prometheus.Counter - } - - func NewMetrics(r prometheus.Registerer) *Metrics { -@@ -85,6 +87,14 @@ func NewMetrics(r prometheus.Registerer) *Metrics { - Name: ""loki_tsdb_manager_updates_failed_total"", - Help: ""Total number of tsdb manager update failures (loading/rotating tsdbs in mem)"", - }), -+ tsdbHeadRotationsTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{ -+ Name: ""loki_tsdb_head_rotations_total"", -+ Help: ""Total number of tsdb head rotations"", -+ }), -+ tsdbHeadRotationsFailedTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{ -+ Name: ""loki_tsdb_head_rotations_failed_total"", -+ Help: ""Total number of tsdb head rotations failed"", -+ }), - } - } - -diff --git a/pkg/storage/stores/tsdb/head_manager.go b/pkg/storage/stores/tsdb/head_manager.go -index a9101e924ddba..22ab092f2dcb6 100644 ---- a/pkg/storage/stores/tsdb/head_manager.go -+++ b/pkg/storage/stores/tsdb/head_manager.go -@@ -39,7 +39,11 @@ which we use in file creation/etc. - */ - type period time.Duration - --const defaultRotationPeriod = period(15 * time.Minute) -+const ( -+ defaultRotationPeriod = period(15 * time.Minute) -+ // defines the period to check for active head rotation -+ defaultRotationCheckPeriod = 1 * time.Minute -+) - - func (p period) PeriodFor(t time.Time) int { - return int(t.UnixNano() / int64(p)) -@@ -101,6 +105,9 @@ type HeadManager struct { - activeHeads, prevHeads *tenantHeads - - Index -+ -+ wg sync.WaitGroup -+ cancel chan struct{} - } - - func NewHeadManager(logger log.Logger, dir string, metrics *Metrics, tsdbManager TSDBManager) *HeadManager { -@@ -113,6 +120,8 @@ func NewHeadManager(logger log.Logger, dir string, metrics *Metrics, tsdbManager - - period: defaultRotationPeriod, - shards: shards, -+ -+ cancel: make(chan struct{}), - } - - m.Index = LazyIndex(func() (Index, error) { -@@ -134,7 +143,73 @@ func NewHeadManager(logger log.Logger, dir string, metrics *Metrics, tsdbManager - return m - } - -+func (m *HeadManager) loop() { -+ defer m.wg.Done() -+ -+ buildPrev := func() error { -+ if m.prev == nil { -+ return nil -+ } -+ -+ if err := m.buildTSDBFromWAL(m.prev.initialized); err != nil { -+ return errors.Wrap(err, ""building tsdb head"") -+ } -+ -+ // Now that the tsdbManager has the updated TSDBs, we can remove our references -+ m.mtx.Lock() -+ defer m.mtx.Unlock() -+ m.prevHeads = nil -+ m.prev = nil -+ -+ return nil -+ } -+ -+ ticker := time.NewTicker(defaultRotationCheckPeriod) -+ defer ticker.Stop() -+ -+ for { -+ select { -+ case <-ticker.C: -+ // retry tsdb build failures from previous run -+ if err := buildPrev(); err != nil { -+ level.Error(m.log).Log( -+ ""msg"", ""failed building tsdb head"", -+ ""period"", m.period.PeriodFor(m.prev.initialized), -+ ""err"", err, -+ ) -+ // rotating head without building prev would result in loss of index for that period (until restart) -+ continue -+ } -+ -+ now := time.Now() -+ if m.period.PeriodFor(now) > m.period.PeriodFor(m.activeHeads.start) { -+ if err := m.Rotate(now); err != nil { -+ level.Error(m.log).Log( -+ ""msg"", ""failed rotating tsdb head"", -+ ""period"", m.period.PeriodFor(m.prev.initialized), -+ ""err"", err, -+ ) -+ continue -+ } -+ } -+ -+ // build tsdb from rotated-out period -+ if err := buildPrev(); err != nil { -+ level.Error(m.log).Log( -+ ""msg"", ""failed building tsdb head"", -+ ""err"", err, -+ ) -+ } -+ case <-m.cancel: -+ return -+ } -+ } -+} -+ - func (m *HeadManager) Stop() error { -+ close(m.cancel) -+ m.wg.Wait() -+ - m.mtx.Lock() - defer m.mtx.Unlock() - if err := m.active.Stop(); err != nil { -@@ -153,15 +228,8 @@ func (m *HeadManager) Append(userID string, ls labels.Labels, chks index.ChunkMe - ls = b.Labels() - - m.mtx.RLock() -- now := time.Now() -- if m.period.PeriodFor(now) > m.period.PeriodFor(m.activeHeads.start) { -- m.mtx.RUnlock() -- if err := m.Rotate(now); err != nil { -- return errors.Wrap(err, ""rotating TSDB Head"") -- } -- m.mtx.RLock() -- } - defer m.mtx.RUnlock() -+ - rec := m.activeHeads.Append(userID, ls, chks) - return m.active.Log(rec) - } -@@ -206,7 +274,15 @@ func (m *HeadManager) Start() error { - return errors.New(""cleaning (removing) wal dir"") - } - -- return m.Rotate(now) -+ err = m.Rotate(now) -+ if err != nil { -+ return errors.Wrap(err, ""rotating tsdb head"") -+ } -+ -+ m.wg.Add(1) -+ go m.loop() -+ -+ return nil - } - - func managerRequiredDirs(parent string) []string { -@@ -233,14 +309,13 @@ func managerPerTenantDir(parent string) string { - return filepath.Join(parent, ""per_tenant"") - } - --func (m *HeadManager) Rotate(t time.Time) error { -- m.mtx.Lock() -- defer m.mtx.Unlock() -- -- if m.activeHeads != nil && m.period.PeriodFor(t) == m.period.PeriodFor(m.activeHeads.start) { -- // no-op, we've already rotated to the desired period -- return nil -- } -+func (m *HeadManager) Rotate(t time.Time) (err error) { -+ defer func() { -+ m.metrics.tsdbHeadRotationsTotal.Inc() -+ if err != nil { -+ m.metrics.tsdbHeadRotationsFailedTotal.Inc() -+ } -+ }() - - // create new wal - nextWALPath := walPath(m.dir, t) -@@ -252,37 +327,25 @@ func (m *HeadManager) Rotate(t time.Time) error { - // create new tenant heads - nextHeads := newTenantHeads(t, m.shards, m.metrics, m.log) - -- stopPrev := func(s string) { -- if m.prev != nil { -- if err := m.prev.Stop(); err != nil { -- level.Error(m.log).Log( -- ""msg"", ""failed stopping wal"", -- ""period"", m.period.PeriodFor(m.prev.initialized), -- ""err"", err, -- ""wal"", s, -- ) -- } -- } -- } -+ m.mtx.Lock() -+ defer m.mtx.Unlock() - -- stopPrev(""previous cycle"") // stop the previous wal if it hasn't been cleaned up yet - m.prev = m.active - m.prevHeads = m.activeHeads - m.active = nextWAL - m.activeHeads = nextHeads -- stopPrev(""freshly rotated"") // stop the newly rotated-out wal - -- // build tsdb from rotated-out period -- // TODO(owen-d): don't block Append() waiting for tsdb building. Use a work channel/etc -+ // stop the newly rotated-out wal - if m.prev != nil { -- if err := m.buildTSDBFromWAL(m.prev.initialized); err != nil { -- return errors.Wrap(err, ""building tsdb from rotated out period"") -+ if err := m.prev.Stop(); err != nil { -+ level.Error(m.log).Log( -+ ""msg"", ""failed stopping wal"", -+ ""period"", m.period.PeriodFor(m.prev.initialized), -+ ""err"", err, -+ ) - } - } - -- // Now that the tsdbManager has the updated TSDBs, we can remove our references -- m.prevHeads = nil -- m.prev = nil - return nil - } - -diff --git a/pkg/storage/stores/tsdb/head_manager_test.go b/pkg/storage/stores/tsdb/head_manager_test.go -index e778bdc423587..4739abd46c76d 100644 ---- a/pkg/storage/stores/tsdb/head_manager_test.go -+++ b/pkg/storage/stores/tsdb/head_manager_test.go -@@ -166,7 +166,7 @@ func Test_HeadManager_RecoverHead(t *testing.T) { - }, - } - -- mgr := NewHeadManager(log.NewNopLogger(), dir, nil, newNoopTSDBManager(dir)) -+ mgr := NewHeadManager(log.NewNopLogger(), dir, NewMetrics(nil), newNoopTSDBManager(dir)) - // This bit is normally handled by the Start() fn, but we're testing a smaller surface area - // so ensure our dirs exist - for _, d := range managerRequiredDirs(dir) { -@@ -249,7 +249,7 @@ func Test_HeadManager_Lifecycle(t *testing.T) { - }, - } - -- mgr := NewHeadManager(log.NewNopLogger(), dir, nil, newNoopTSDBManager(dir)) -+ mgr := NewHeadManager(log.NewNopLogger(), dir, NewMetrics(nil), newNoopTSDBManager(dir)) - w, err := newHeadWAL(log.NewNopLogger(), walPath(mgr.dir, curPeriod), curPeriod) - require.Nil(t, err)",tsdb,"build rotated heads asynchronously (#6956) - - - -**What this PR does / why we need it**: -Currently `Append` call to the head has to wait for block creation to complete when rotating head. -And since we rotate head every 15m (default period), this would result in latency spikes at these times. -This PR moves the TSDB build step to an async routine. - -**Which issue(s) this PR fixes**: -Fixes # - -**Special notes for your reviewer**: - - -**Checklist** -- [ ] Documentation added -- [ ] Tests updated -- [ ] Is this an important fix or new feature? Add an entry in the `CHANGELOG.md`. -- [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/upgrading/_index.md` - -Signed-off-by: Ashwanth Goli -Co-authored-by: Owen Diehl " -d8305ecd0a20dc530fcb073d8372c5c53449aa43,2022-01-03 20:19:27,Shardul Srivastava,Add doc for limit param max_concurrent_tail_requests (#5011),False,"diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md -index 87d21a4dd21ea..a2aa6aa63e583 100644 ---- a/docs/sources/configuration/_index.md -+++ b/docs/sources/configuration/_index.md -@@ -2029,6 +2029,10 @@ The `limits_config` block configures global and per-tenant limits in Loki. - # CLI flag: -querier.max-streams-matcher-per-query - [max_streams_matchers_per_query: | default = 1000] - -+# Maximum number of concurrent tail requests. -+# CLI flag: -querier.max-concurrent-tail-requests -+[max_concurrent_tail_requests: | default = 10] -+ - # Duration to delay the evaluation of rules to ensure. - # CLI flag: -ruler.evaluation-delay-duration - [ruler_evaluation_delay_duration: | default = 0s]",unknown,Add doc for limit param max_concurrent_tail_requests (#5011) +version https://git-lfs.github.com/spec/v1 +oid sha256:b8f0096518b347268de012dae5c13416d99807ba81ad0e2331a600b56b8b6124 +size 71804266